From 6247dbbababec0d0b0755966f2c027108dc8df5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Nov 2023 04:34:10 +0000 Subject: [PATCH] Bump github.com/prometheus/prometheus from 0.44.0 to 0.47.2 Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.44.0 to 0.47.2. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.44.0...v0.47.2) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 105 +- go.sum | 227 +- .../go/compute/internal/version.go | 2 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 611 + .../azure-sdk-for-go/sdk/azcore/LICENSE.txt | 21 + .../azure-sdk-for-go/sdk/azcore/README.md | 39 + .../Azure/azure-sdk-for-go/sdk/azcore/ci.yml | 29 + .../sdk/azcore/cloud/cloud.go | 44 + .../azure-sdk-for-go/sdk/azcore/cloud/doc.go | 53 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 132 + .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 257 + .../azure-sdk-for-go/sdk/azcore/errors.go | 14 + .../Azure/azure-sdk-for-go/sdk/azcore/etag.go | 48 + .../sdk/azcore/internal/exported/exported.go | 67 + .../sdk/azcore/internal/exported/pipeline.go | 97 + .../sdk/azcore/internal/exported/request.go | 182 + .../internal/exported/response_error.go | 144 + .../sdk/azcore/internal/log/log.go | 38 + .../azcore/internal/pollers/async/async.go | 159 + .../sdk/azcore/internal/pollers/body/body.go | 135 + .../sdk/azcore/internal/pollers/loc/loc.go | 119 + .../sdk/azcore/internal/pollers/op/op.go | 145 + .../sdk/azcore/internal/pollers/poller.go | 24 + .../sdk/azcore/internal/pollers/util.go | 187 + .../sdk/azcore/internal/shared/constants.go | 36 + .../sdk/azcore/internal/shared/shared.go | 103 + .../azure-sdk-for-go/sdk/azcore/log/doc.go | 10 + .../azure-sdk-for-go/sdk/azcore/log/log.go | 50 + .../azure-sdk-for-go/sdk/azcore/policy/doc.go | 10 + .../sdk/azcore/policy/policy.go | 164 + .../sdk/azcore/runtime/doc.go | 10 + .../sdk/azcore/runtime/errors.go | 19 + .../sdk/azcore/runtime/pager.go | 77 + .../sdk/azcore/runtime/pipeline.go | 66 + .../sdk/azcore/runtime/policy_api_version.go | 75 + .../sdk/azcore/runtime/policy_bearer_token.go | 116 + .../azcore/runtime/policy_body_download.go | 72 + .../sdk/azcore/runtime/policy_http_header.go | 39 + .../azcore/runtime/policy_include_response.go | 34 + .../sdk/azcore/runtime/policy_logging.go | 263 + .../sdk/azcore/runtime/policy_request_id.go | 34 + .../sdk/azcore/runtime/policy_retry.go | 262 + .../sdk/azcore/runtime/policy_telemetry.go | 79 + .../sdk/azcore/runtime/poller.go | 327 + .../sdk/azcore/runtime/request.go | 248 + .../sdk/azcore/runtime/response.go | 135 + .../runtime/transport_default_http_client.go | 37 + .../sdk/azcore/streaming/doc.go | 9 + .../sdk/azcore/streaming/progress.go | 75 + .../sdk/azcore/tracing/constants.go | 41 + .../sdk/azcore/tracing/tracing.go | 168 + .../sdk/azidentity/CHANGELOG.md | 409 + .../sdk/azidentity/LICENSE.txt | 21 + .../sdk/azidentity/MIGRATION.md | 307 + .../azure-sdk-for-go/sdk/azidentity/README.md | 243 + .../sdk/azidentity/TROUBLESHOOTING.md | 205 + .../sdk/azidentity/assets.json | 6 + .../sdk/azidentity/azidentity.go | 190 + .../sdk/azidentity/azure_cli_credential.go | 180 + .../azidentity/chained_token_credential.go | 138 + .../azure-sdk-for-go/sdk/azidentity/ci.yml | 47 + .../azidentity/client_assertion_credential.go | 83 + .../client_certificate_credential.go | 172 + .../azidentity/client_secret_credential.go | 75 + .../azidentity/default_azure_credential.go | 209 + .../sdk/azidentity/device_code_credential.go | 136 + .../sdk/azidentity/environment_credential.go | 164 + .../azure-sdk-for-go/sdk/azidentity/errors.go | 129 + .../interactive_browser_credential.go | 106 + .../sdk/azidentity/logging.go | 14 + .../sdk/azidentity/managed_identity_client.go | 388 + .../azidentity/managed_identity_credential.go | 127 + .../sdk/azidentity/on_behalf_of_credential.go | 99 + .../azure-sdk-for-go/sdk/azidentity/syncer.go | 130 + .../username_password_credential.go | 81 + .../sdk/azidentity/version.go | 15 + .../sdk/azidentity/workload_identity.go | 126 + .../azure-sdk-for-go/sdk/internal/LICENSE.txt | 21 + .../sdk/internal/diag/diag.go | 51 + .../azure-sdk-for-go/sdk/internal/diag/doc.go | 7 + .../sdk/internal/errorinfo/doc.go | 7 + .../sdk/internal/errorinfo/errorinfo.go | 16 + .../sdk/internal/exported/exported.go | 124 + .../azure-sdk-for-go/sdk/internal/log/doc.go | 7 + .../azure-sdk-for-go/sdk/internal/log/log.go | 104 + .../sdk/internal/poller/util.go | 155 + .../sdk/internal/temporal/resource.go | 123 + .../azure-sdk-for-go/sdk/internal/uuid/doc.go | 7 + .../sdk/internal/uuid/uuid.go | 76 + .../Azure/go-autorest/autorest/autorest.go | 32 +- .../Azure/go-autorest/autorest/azure/azure.go | 2 +- .../Azure/go-autorest/autorest/utility.go | 6 +- .../LICENSE | 21 + .../apps/cache/cache.go | 54 + .../apps/confidential/confidential.go | 685 + .../apps/errors/error_design.md | 111 + .../apps/errors/errors.go | 89 + .../apps/internal/base/base.go | 467 + .../internal/base/internal/storage/items.go | 200 + .../internal/storage/partitioned_storage.go | 436 + .../internal/base/internal/storage/storage.go | 517 + .../storage/test_serialized_cache.json | 56 + .../apps/internal/exported/exported.go | 34 + .../apps/internal/json/design.md | 140 + .../apps/internal/json/json.go | 184 + .../apps/internal/json/mapslice.go | 333 + .../apps/internal/json/marshal.go | 346 + .../apps/internal/json/struct.go | 290 + .../apps/internal/json/types/time/time.go | 70 + .../apps/internal/local/server.go | 177 + .../apps/internal/oauth/oauth.go | 353 + .../oauth/ops/accesstokens/accesstokens.go | 451 + .../oauth/ops/accesstokens/apptype_string.go | 25 + .../internal/oauth/ops/accesstokens/tokens.go | 335 + .../internal/oauth/ops/authority/authority.go | 552 + .../ops/authority/authorizetype_string.go | 30 + .../internal/oauth/ops/internal/comm/comm.go | 320 + .../oauth/ops/internal/comm/compress.go | 33 + .../oauth/ops/internal/grant/grant.go | 17 + .../apps/internal/oauth/ops/ops.go | 56 + .../ops/wstrust/defs/endpointtype_string.go | 25 + .../wstrust/defs/mex_document_definitions.go | 394 + .../defs/saml_assertion_definitions.go | 230 + .../oauth/ops/wstrust/defs/version_string.go | 25 + .../ops/wstrust/defs/wstrust_endpoint.go | 199 + .../ops/wstrust/defs/wstrust_mex_document.go | 159 + .../internal/oauth/ops/wstrust/wstrust.go | 136 + .../apps/internal/oauth/resolvers.go | 149 + .../apps/internal/options/options.go | 52 + .../apps/internal/shared/shared.go | 71 + .../apps/internal/version/version.go | 8 + .../apps/public/public.go | 683 + .../Microsoft/go-winio/.golangci.yml | 27 +- .../github.com/Microsoft/go-winio/hvsock.go | 6 +- .../Microsoft/go-winio/internal/fs/doc.go | 2 + .../Microsoft/go-winio/internal/fs/fs.go | 202 + .../go-winio/internal/fs/security.go | 12 + .../go-winio/internal/fs/zsyscall_windows.go | 64 + .../go-winio/internal/socket/socket.go | 4 +- .../go-winio/internal/stringbuffer/wstring.go | 132 + vendor/github.com/Microsoft/go-winio/pipe.go | 22 +- .../Microsoft/go-winio/zsyscall_windows.go | 19 - .../aws/aws-sdk-go/aws/auth/bearer/token.go | 50 + .../aws/credentials/ssocreds/provider.go | 75 +- .../credentials/ssocreds/sso_cached_token.go | 237 + .../credentials/ssocreds/token_provider.go | 139 + .../stscreds/assume_role_provider.go | 12 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1874 ++- .../aws/aws-sdk-go/aws/session/credentials.go | 33 +- .../aws/aws-sdk-go/aws/session/session.go | 2 +- .../aws-sdk-go/aws/session/shared_config.go | 168 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 11 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/private/protocol/rest/build.go | 4 + .../protocol/restjson/unmarshal_error.go | 135 +- .../aws/aws-sdk-go/service/ec2/api.go | 2234 +++- .../aws-sdk-go/service/ec2/customizations.go | 22 +- .../aws/aws-sdk-go/service/lightsail/api.go | 33 +- .../aws/aws-sdk-go/service/ssooidc/api.go | 1682 +++ .../aws/aws-sdk-go/service/ssooidc/doc.go | 66 + .../aws/aws-sdk-go/service/ssooidc/errors.go | 107 + .../aws/aws-sdk-go/service/ssooidc/service.go | 106 + .../aws/aws-sdk-go/service/sts/api.go | 135 +- .../aws/aws-sdk-go/service/sts/doc.go | 7 +- .../github.com/digitalocean/godo/CHANGELOG.md | 7 + vendor/github.com/digitalocean/godo/README.md | 2 +- vendor/github.com/digitalocean/godo/godo.go | 4 +- .../github.com/digitalocean/godo/registry.go | 21 + vendor/github.com/digitalocean/godo/tokens.go | 228 - .../emicklei/go-restful/v3/CHANGES.md | 5 + .../emicklei/go-restful/v3/README.md | 4 + .../emicklei/go-restful/v3/route_builder.go | 24 +- .../envoy/config/core/v3/health_check.pb.go | 638 +- .../core/v3/health_check.pb.validate.go | 34 + .../envoyproxy/protoc-gen-validate/NOTICE | 4 - .../github.com/fatih/color/color_windows.go | 19 + vendor/github.com/go-openapi/errors/api.go | 2 +- .../go-openapi/jsonpointer/pointer.go | 147 +- vendor/github.com/go-openapi/spec/info.go | 19 + .../github.com/go-openapi/spec/properties.go | 6 +- .../github.com/go-openapi/spec/responses.go | 27 +- vendor/github.com/go-openapi/swag/util.go | 16 +- .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 1435 ++ .../github.com/google/pprof/profile/encode.go | 6 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 8 + .../googleapis/gax-go/v2/callctx/callctx.go | 74 + .../github.com/googleapis/gax-go/v2/header.go | 49 + .../googleapis/gax-go/v2/internal/version.go | 2 +- .../gophercloud/gophercloud/CHANGELOG.md | 36 + .../gophercloud/gophercloud/errors.go | 48 + .../gophercloud/provider_client.go | 2 +- vendor/github.com/hashicorp/consul/api/acl.go | 3 + .../github.com/hashicorp/consul/api/agent.go | 34 +- vendor/github.com/hashicorp/consul/api/api.go | 16 + .../hashicorp/consul/api/catalog.go | 12 +- .../hashicorp/consul/api/config_entry.go | 82 +- .../consul/api/config_entry_discoverychain.go | 30 +- .../consul/api/config_entry_exports.go | 6 + .../consul/api/config_entry_gateways.go | 3 + .../api/config_entry_inline_certificate.go | 3 + .../consul/api/config_entry_intentions.go | 51 +- .../consul/api/config_entry_jwt_provider.go | 237 + .../hashicorp/consul/api/config_entry_mesh.go | 7 + .../consul/api/config_entry_rate_limit_ip.go | 91 + .../consul/api/config_entry_routes.go | 3 + .../consul/api/config_entry_sameness_group.go | 29 + .../consul/api/config_entry_status.go | 284 +- .../hashicorp/consul/api/connect.go | 6 + .../hashicorp/consul/api/connect_ca.go | 3 + .../hashicorp/consul/api/connect_intention.go | 7 + .../hashicorp/consul/api/coordinate.go | 3 + .../github.com/hashicorp/consul/api/debug.go | 3 + .../hashicorp/consul/api/discovery_chain.go | 4 + .../github.com/hashicorp/consul/api/event.go | 3 + .../github.com/hashicorp/consul/api/health.go | 3 + .../hashicorp/consul/api/internal.go | 64 + vendor/github.com/hashicorp/consul/api/kv.go | 3 + .../github.com/hashicorp/consul/api/lock.go | 3 + .../hashicorp/consul/api/namespace.go | 3 + .../hashicorp/consul/api/operator.go | 3 + .../hashicorp/consul/api/operator_area.go | 3 + .../consul/api/operator_autopilot.go | 3 + .../hashicorp/consul/api/operator_keyring.go | 3 + .../hashicorp/consul/api/operator_license.go | 3 + .../hashicorp/consul/api/operator_raft.go | 3 + .../hashicorp/consul/api/operator_segment.go | 3 + .../hashicorp/consul/api/operator_usage.go | 3 + .../hashicorp/consul/api/partition.go | 3 + .../hashicorp/consul/api/peering.go | 13 + .../hashicorp/consul/api/prepared_query.go | 21 +- vendor/github.com/hashicorp/consul/api/raw.go | 3 + .../hashicorp/consul/api/semaphore.go | 3 + .../hashicorp/consul/api/session.go | 3 + .../hashicorp/consul/api/snapshot.go | 3 + .../github.com/hashicorp/consul/api/status.go | 3 + vendor/github.com/hashicorp/consul/api/txn.go | 3 + vendor/github.com/hashicorp/cronexpr/LICENSE | 202 + vendor/github.com/hashicorp/go-hclog/LICENSE | 4 +- .../hashicorp/go-hclog/colorize_unix.go | 35 +- .../hashicorp/go-hclog/colorize_windows.go | 43 +- .../github.com/hashicorp/go-hclog/context.go | 3 + .../github.com/hashicorp/go-hclog/exclude.go | 3 + .../github.com/hashicorp/go-hclog/global.go | 3 + .../hashicorp/go-hclog/interceptlogger.go | 3 + .../hashicorp/go-hclog/intlogger.go | 31 +- .../github.com/hashicorp/go-hclog/logger.go | 17 + .../hashicorp/go-hclog/nulllogger.go | 3 + .../github.com/hashicorp/go-hclog/stdlog.go | 3 + .../github.com/hashicorp/go-hclog/writer.go | 3 + .../hashicorp/go-retryablehttp/CHANGELOG.md | 9 + .../hashicorp/go-retryablehttp/CODEOWNERS | 1 + .../hashicorp/go-retryablehttp/LICENSE | 2 + .../hashicorp/go-retryablehttp/client.go | 16 +- .../go-retryablehttp/roundtripper.go | 3 + .../hashicorp/nomad/api/allocations.go | 14 + vendor/github.com/hashicorp/nomad/api/api.go | 31 +- .../hashicorp/nomad/api/contexts/contexts.go | 1 + .../nomad/api/error_unexpected_response.go | 178 + .../hashicorp/nomad/api/event_stream.go | 12 + vendor/github.com/hashicorp/nomad/api/jobs.go | 16 +- .../hashicorp/nomad/api/namespace.go | 25 +- .../hashicorp/nomad/api/node_pools.go | 131 + .../github.com/hashicorp/nomad/api/nodes.go | 2 + .../hashicorp/nomad/api/operator.go | 12 +- .../hashicorp/nomad/api/services.go | 1 + .../github.com/hashicorp/nomad/api/tasks.go | 14 + .../hashicorp/nomad/api/variables.go | 39 - .../hcloud-go/hcloud/schema/server_type.go | 27 - .../hetznercloud/hcloud-go/hcloud/testing.go | 18 - .../hetznercloud/hcloud-go/{ => v2}/LICENSE | 0 .../hcloud-go/{ => v2}/hcloud/action.go | 44 +- .../hcloud-go/{ => v2}/hcloud/architecture.go | 0 .../hcloud-go/{ => v2}/hcloud/certificate.go | 38 +- .../hcloud-go/{ => v2}/hcloud/client.go | 7 +- .../hcloud-go/{ => v2}/hcloud/datacenter.go | 22 +- .../hcloud-go/v2/hcloud/deprecation.go | 59 + .../hcloud-go/{ => v2}/hcloud/error.go | 0 .../hcloud-go/{ => v2}/hcloud/firewall.go | 34 +- .../hcloud-go/{ => v2}/hcloud/floating_ip.go | 18 +- .../hcloud-go/{ => v2}/hcloud/hcloud.go | 2 +- .../hcloud-go/{ => v2}/hcloud/helper.go | 0 .../hcloud-go/{ => v2}/hcloud/image.go | 14 +- .../internal/instrumentation/metrics.go | 0 .../hcloud-go/{ => v2}/hcloud/iso.go | 26 +- .../hcloud-go/{ => v2}/hcloud/labels.go | 4 +- .../{ => v2}/hcloud/load_balancer.go | 40 +- .../{ => v2}/hcloud/load_balancer_type.go | 20 +- .../hcloud-go/{ => v2}/hcloud/location.go | 20 +- .../hcloud-go/{ => v2}/hcloud/network.go | 32 +- .../{ => v2}/hcloud/placement_group.go | 14 +- .../hcloud-go/{ => v2}/hcloud/pricing.go | 2 +- .../hcloud-go/{ => v2}/hcloud/primary_ip.go | 52 +- .../hcloud-go/{ => v2}/hcloud/rdns.go | 10 +- .../hcloud-go/{ => v2}/hcloud/resource.go | 2 +- .../hcloud-go/{ => v2}/hcloud/schema.go | 48 +- .../{ => v2}/hcloud/schema/action.go | 4 +- .../{ => v2}/hcloud/schema/certificate.go | 4 +- .../{ => v2}/hcloud/schema/datacenter.go | 6 +- .../hcloud-go/v2/hcloud/schema/deprecation.go | 12 + .../hcloud-go/{ => v2}/hcloud/schema/error.go | 0 .../{ => v2}/hcloud/schema/firewall.go | 4 +- .../{ => v2}/hcloud/schema/floating_ip.go | 8 +- .../hcloud-go/{ => v2}/hcloud/schema/image.go | 6 +- .../hcloud-go/{ => v2}/hcloud/schema/iso.go | 2 +- .../{ => v2}/hcloud/schema/load_balancer.go | 58 +- .../hcloud/schema/load_balancer_type.go | 2 +- .../{ => v2}/hcloud/schema/location.go | 2 +- .../hcloud-go/{ => v2}/hcloud/schema/meta.go | 0 .../{ => v2}/hcloud/schema/network.go | 39 +- .../{ => v2}/hcloud/schema/placement_group.go | 4 +- .../{ => v2}/hcloud/schema/pricing.go | 4 +- .../{ => v2}/hcloud/schema/primary_ip.go | 11 +- .../{ => v2}/hcloud/schema/server.go | 42 +- .../hcloud-go/v2/hcloud/schema/server_type.go | 29 + .../{ => v2}/hcloud/schema/ssh_key.go | 2 +- .../{ => v2}/hcloud/schema/volume.go | 8 +- .../hcloud-go/{ => v2}/hcloud/server.go | 20 +- .../hcloud-go/{ => v2}/hcloud/server_type.go | 25 +- .../hcloud-go/{ => v2}/hcloud/ssh_key.go | 12 +- .../hcloud-go/v2/hcloud/testing.go | 16 + .../hcloud-go/{ => v2}/hcloud/volume.go | 12 +- .../github.com/imdario/mergo/CONTRIBUTING.md | 112 + vendor/github.com/imdario/mergo/README.md | 25 +- vendor/github.com/imdario/mergo/SECURITY.md | 14 + vendor/github.com/imdario/mergo/map.go | 6 +- vendor/github.com/imdario/mergo/merge.go | 59 +- vendor/github.com/imdario/mergo/mergo.go | 11 +- .../ionos-cloud/sdk-go/v6/README.md | 3 +- .../ionos-cloud/sdk-go/v6/api_lans.go | 173 +- .../ionos-cloud/sdk-go/v6/client.go | 22 +- .../ionos-cloud/sdk-go/v6/configuration.go | 2 +- .../v6/model_application_load_balancer.go | 140 +- ...odel_application_load_balancer_entities.go | 3 +- ...plication_load_balancer_forwarding_rule.go | 157 +- ...oad_balancer_forwarding_rule_properties.go | 181 +- ...ation_load_balancer_forwarding_rule_put.go | 128 +- ...lication_load_balancer_forwarding_rules.go | 199 +- ...del_application_load_balancer_http_rule.go | 271 +- ...ation_load_balancer_http_rule_condition.go | 109 +- ...el_application_load_balancer_properties.go | 163 +- .../v6/model_application_load_balancer_put.go | 128 +- .../v6/model_application_load_balancers.go | 199 +- .../sdk-go/v6/model_attached_volumes.go | 199 +- .../sdk-go/v6/model_backup_unit.go | 157 +- .../sdk-go/v6/model_backup_unit_properties.go | 91 +- .../sdk-go/v6/model_backup_unit_sso.go | 3 +- .../sdk-go/v6/model_backup_units.go | 128 +- .../sdk-go/v6/model_balanced_nics.go | 199 +- .../ionos-cloud/sdk-go/v6/model_cdroms.go | 199 +- .../sdk-go/v6/model_connectable_datacenter.go | 65 +- .../ionos-cloud/sdk-go/v6/model_contract.go | 64 +- .../sdk-go/v6/model_contract_properties.go | 97 +- .../ionos-cloud/sdk-go/v6/model_contracts.go | 128 +- .../v6/model_cpu_architecture_properties.go | 12 +- .../sdk-go/v6/model_data_center_entities.go | 188 +- .../ionos-cloud/sdk-go/v6/model_datacenter.go | 140 +- .../v6/model_datacenter_element_metadata.go | 242 +- .../sdk-go/v6/model_datacenter_properties.go | 226 +- .../sdk-go/v6/model_datacenters.go | 199 +- .../ionos-cloud/sdk-go/v6/model_error.go | 6 +- .../sdk-go/v6/model_error_message.go | 6 +- .../sdk-go/v6/model_firewall_rule.go | 157 +- .../sdk-go/v6/model_firewall_rules.go | 199 +- .../v6/model_firewallrule_properties.go | 376 +- .../ionos-cloud/sdk-go/v6/model_flow_log.go | 157 +- .../sdk-go/v6/model_flow_log_properties.go | 108 +- .../sdk-go/v6/model_flow_log_put.go | 128 +- .../ionos-cloud/sdk-go/v6/model_flow_logs.go | 199 +- .../ionos-cloud/sdk-go/v6/model_group.go | 135 +- .../sdk-go/v6/model_group_entities.go | 62 +- .../sdk-go/v6/model_group_members.go | 128 +- .../sdk-go/v6/model_group_properties.go | 557 +- .../sdk-go/v6/model_group_share.go | 128 +- .../sdk-go/v6/model_group_share_properties.go | 6 +- .../sdk-go/v6/model_group_shares.go | 128 +- .../sdk-go/v6/model_group_users.go | 128 +- .../ionos-cloud/sdk-go/v6/model_groups.go | 128 +- .../ionos-cloud/sdk-go/v6/model_image.go | 157 +- .../sdk-go/v6/model_image_properties.go | 625 +- .../ionos-cloud/sdk-go/v6/model_images.go | 128 +- .../ionos-cloud/sdk-go/v6/model_info.go | 9 +- .../ionos-cloud/sdk-go/v6/model_ip_block.go | 157 +- .../sdk-go/v6/model_ip_block_properties.go | 129 +- .../ionos-cloud/sdk-go/v6/model_ip_blocks.go | 199 +- .../sdk-go/v6/model_ip_consumer.go | 275 +- .../sdk-go/v6/model_ip_failover.go | 6 +- .../v6/model_kubernetes_auto_scaling.go | 68 +- .../sdk-go/v6/model_kubernetes_cluster.go | 140 +- .../v6/model_kubernetes_cluster_entities.go | 3 +- .../v6/model_kubernetes_cluster_for_post.go | 140 +- .../v6/model_kubernetes_cluster_for_put.go | 140 +- .../v6/model_kubernetes_cluster_properties.go | 225 +- ..._kubernetes_cluster_properties_for_post.go | 79 +- ...l_kubernetes_cluster_properties_for_put.go | 79 +- .../sdk-go/v6/model_kubernetes_clusters.go | 128 +- .../v6/model_kubernetes_maintenance_window.go | 6 +- .../sdk-go/v6/model_kubernetes_node.go | 157 +- .../v6/model_kubernetes_node_metadata.go | 159 +- .../sdk-go/v6/model_kubernetes_node_pool.go | 157 +- .../v6/model_kubernetes_node_pool_for_post.go | 157 +- .../v6/model_kubernetes_node_pool_for_put.go | 157 +- .../v6/model_kubernetes_node_pool_lan.go | 70 +- .../model_kubernetes_node_pool_lan_routes.go | 64 +- .../model_kubernetes_node_pool_properties.go | 529 +- ...ubernetes_node_pool_properties_for_post.go | 544 +- ...kubernetes_node_pool_properties_for_put.go | 241 +- .../sdk-go/v6/model_kubernetes_node_pools.go | 128 +- .../v6/model_kubernetes_node_properties.go | 106 +- .../sdk-go/v6/model_kubernetes_nodes.go | 128 +- .../ionos-cloud/sdk-go/v6/model_label.go | 157 +- .../sdk-go/v6/model_label_properties.go | 79 +- .../sdk-go/v6/model_label_resource.go | 157 +- .../v6/model_label_resource_properties.go | 6 +- .../sdk-go/v6/model_label_resources.go | 199 +- .../ionos-cloud/sdk-go/v6/model_labels.go | 128 +- .../ionos-cloud/sdk-go/v6/model_lan.go | 140 +- .../sdk-go/v6/model_lan_entities.go | 3 +- .../ionos-cloud/sdk-go/v6/model_lan_nics.go | 199 +- .../ionos-cloud/sdk-go/v6/model_lan_post.go | 341 + .../sdk-go/v6/model_lan_properties.go | 121 +- .../sdk-go/v6/model_lan_properties_post.go | 305 + .../ionos-cloud/sdk-go/v6/model_lans.go | 199 +- .../sdk-go/v6/model_loadbalancer.go | 140 +- .../sdk-go/v6/model_loadbalancer_entities.go | 3 +- .../v6/model_loadbalancer_properties.go | 82 +- .../sdk-go/v6/model_loadbalancers.go | 199 +- .../ionos-cloud/sdk-go/v6/model_location.go | 157 +- .../sdk-go/v6/model_location_properties.go | 76 +- .../ionos-cloud/sdk-go/v6/model_locations.go | 128 +- .../sdk-go/v6/model_nat_gateway.go | 140 +- .../sdk-go/v6/model_nat_gateway_entities.go | 62 +- .../v6/model_nat_gateway_lan_properties.go | 64 +- .../sdk-go/v6/model_nat_gateway_properties.go | 91 +- .../sdk-go/v6/model_nat_gateway_put.go | 128 +- .../sdk-go/v6/model_nat_gateway_rule.go | 157 +- .../v6/model_nat_gateway_rule_properties.go | 147 +- .../sdk-go/v6/model_nat_gateway_rule_put.go | 128 +- .../sdk-go/v6/model_nat_gateway_rules.go | 128 +- .../sdk-go/v6/model_nat_gateways.go | 199 +- .../sdk-go/v6/model_network_load_balancer.go | 140 +- .../model_network_load_balancer_entities.go | 6 +- ...l_network_load_balancer_forwarding_rule.go | 157 +- ...d_balancer_forwarding_rule_health_check.go | 70 +- ...oad_balancer_forwarding_rule_properties.go | 177 +- ...twork_load_balancer_forwarding_rule_put.go | 128 +- ...rk_load_balancer_forwarding_rule_target.go | 96 +- ...cer_forwarding_rule_target_health_check.go | 9 +- ..._network_load_balancer_forwarding_rules.go | 199 +- .../model_network_load_balancer_properties.go | 163 +- .../v6/model_network_load_balancer_put.go | 128 +- .../sdk-go/v6/model_network_load_balancers.go | 199 +- .../ionos-cloud/sdk-go/v6/model_nic.go | 140 +- .../sdk-go/v6/model_nic_entities.go | 62 +- .../sdk-go/v6/model_nic_properties.go | 429 +- .../ionos-cloud/sdk-go/v6/model_nic_put.go | 128 +- .../ionos-cloud/sdk-go/v6/model_nics.go | 199 +- .../sdk-go/v6/model_no_state_meta_data.go | 239 +- .../sdk-go/v6/model_pagination_links.go | 91 +- .../ionos-cloud/sdk-go/v6/model_peer.go | 151 +- .../sdk-go/v6/model_private_cross_connect.go | 157 +- .../model_private_cross_connect_properties.go | 102 +- .../sdk-go/v6/model_private_cross_connects.go | 128 +- .../sdk-go/v6/model_remote_console_url.go | 3 +- .../ionos-cloud/sdk-go/v6/model_request.go | 157 +- .../sdk-go/v6/model_request_metadata.go | 84 +- .../sdk-go/v6/model_request_properties.go | 72 +- .../sdk-go/v6/model_request_status.go | 128 +- .../v6/model_request_status_metadata.go | 72 +- .../sdk-go/v6/model_request_target.go | 62 +- .../ionos-cloud/sdk-go/v6/model_requests.go | 205 +- .../ionos-cloud/sdk-go/v6/model_resource.go | 140 +- .../sdk-go/v6/model_resource_entities.go | 3 +- .../sdk-go/v6/model_resource_groups.go | 128 +- .../sdk-go/v6/model_resource_limits.go | 732 +- .../sdk-go/v6/model_resource_properties.go | 6 +- .../sdk-go/v6/model_resource_reference.go | 91 +- .../ionos-cloud/sdk-go/v6/model_resources.go | 128 +- .../sdk-go/v6/model_resources_users.go | 128 +- .../ionos-cloud/sdk-go/v6/model_s3_bucket.go | 3 +- .../ionos-cloud/sdk-go/v6/model_s3_key.go | 157 +- .../sdk-go/v6/model_s3_key_metadata.go | 78 +- .../sdk-go/v6/model_s3_key_properties.go | 64 +- .../ionos-cloud/sdk-go/v6/model_s3_keys.go | 128 +- .../sdk-go/v6/model_s3_object_storage_sso.go | 3 +- .../ionos-cloud/sdk-go/v6/model_server.go | 140 +- .../sdk-go/v6/model_server_entities.go | 65 +- .../sdk-go/v6/model_server_properties.go | 344 +- .../ionos-cloud/sdk-go/v6/model_servers.go | 199 +- .../ionos-cloud/sdk-go/v6/model_snapshot.go | 157 +- .../sdk-go/v6/model_snapshot_properties.go | 504 +- .../ionos-cloud/sdk-go/v6/model_snapshots.go | 128 +- .../sdk-go/v6/model_target_group.go | 157 +- .../v6/model_target_group_health_check.go | 67 +- .../model_target_group_http_health_check.go | 146 +- .../v6/model_target_group_properties.go | 194 +- .../sdk-go/v6/model_target_group_put.go | 128 +- .../sdk-go/v6/model_target_group_target.go | 155 +- .../sdk-go/v6/model_target_groups.go | 199 +- .../sdk-go/v6/model_target_port_range.go | 64 +- .../ionos-cloud/sdk-go/v6/model_template.go | 157 +- .../sdk-go/v6/model_template_properties.go | 74 +- .../ionos-cloud/sdk-go/v6/model_templates.go | 128 +- .../ionos-cloud/sdk-go/v6/model_token.go | 3 +- .../ionos-cloud/sdk-go/v6/model_user.go | 140 +- .../sdk-go/v6/model_user_metadata.go | 81 +- .../ionos-cloud/sdk-go/v6/model_user_post.go | 3 +- .../sdk-go/v6/model_user_properties.go | 184 +- .../sdk-go/v6/model_user_properties_post.go | 184 +- .../sdk-go/v6/model_user_properties_put.go | 232 +- .../ionos-cloud/sdk-go/v6/model_user_put.go | 6 +- .../ionos-cloud/sdk-go/v6/model_users.go | 199 +- .../sdk-go/v6/model_users_entities.go | 62 +- .../ionos-cloud/sdk-go/v6/model_volume.go | 157 +- .../sdk-go/v6/model_volume_properties.go | 731 +- .../ionos-cloud/sdk-go/v6/model_volumes.go | 199 +- .../github.com/ionos-cloud/sdk-go/v6/utils.go | 8 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 141 + vendor/github.com/klauspost/compress/LICENSE | 304 + .../github.com/klauspost/compress/README.md | 642 + .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 682 + .../klauspost/compress/fse/decompress.go | 376 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 103 + .../klauspost/compress/huff0/bytereader.go | 44 + .../klauspost/compress/huff0/compress.go | 749 ++ .../klauspost/compress/huff0/decompress.go | 1167 ++ .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 ++ .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 337 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 + .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 + .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 + .../klauspost/compress/zstd/bitreader.go | 140 + .../klauspost/compress/zstd/bitwriter.go | 113 + .../klauspost/compress/zstd/blockdec.go | 726 + .../klauspost/compress/zstd/blockenc.go | 874 ++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 229 + .../klauspost/compress/zstd/decoder.go | 948 ++ .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 161 + .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 530 + .../klauspost/compress/zstd/enc_better.go | 1242 ++ .../klauspost/compress/zstd/enc_dfast.go | 1123 ++ .../klauspost/compress/zstd/enc_fast.go | 891 ++ .../klauspost/compress/zstd/encoder.go | 624 + .../compress/zstd/encoder_options.go | 339 + .../klauspost/compress/zstd/framedec.go | 413 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 + .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 701 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 68 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 508 + .../klauspost/compress/zstd/seqdec_amd64.go | 394 + .../klauspost/compress/zstd/seqdec_amd64.s | 4175 ++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 435 + .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 121 + vendor/github.com/kylelemons/godebug/LICENSE | 202 + .../kylelemons/godebug/diff/diff.go | 186 + .../kylelemons/godebug/pretty/.gitignore | 5 + .../kylelemons/godebug/pretty/doc.go | 25 + .../kylelemons/godebug/pretty/public.go | 188 + .../kylelemons/godebug/pretty/reflect.go | 241 + .../kylelemons/godebug/pretty/structure.go | 223 + .../github.com/linode/linodego/.golangci.yml | 9 + vendor/github.com/linode/linodego/CODEOWNERS | 1 + vendor/github.com/linode/linodego/Makefile | 5 +- .../linode/linodego/account_events.go | 1 + .../linode/linodego/account_oauth_client.go | 6 + .../linode/linodego/account_settings.go | 1 + .../linode/linodego/account_user_grants.go | 3 + .../linode/linodego/account_users.go | 38 +- vendor/github.com/linode/linodego/client.go | 28 +- .../github.com/linode/linodego/databases.go | 8 +- vendor/github.com/linode/linodego/go.work.sum | 104 +- vendor/github.com/linode/linodego/images.go | 35 +- .../linode/linodego/instance_ips.go | 5 + .../github.com/linode/linodego/instances.go | 41 +- vendor/github.com/linode/linodego/kernels.go | 2 + .../linode/linodego/lke_clusters.go | 2 + .../linode/linodego/lke_node_pools.go | 2 + .../linode/linodego/longview_subscriptions.go | 2 + vendor/github.com/linode/linodego/mongo.go | 324 - .../github.com/linode/linodego/network_ips.go | 9 + .../linode/linodego/network_pools.go | 2 + .../linode/linodego/network_ranges.go | 3 + .../linodego/object_storage_bucket_certs.go | 7 + .../linode/linodego/object_storage_buckets.go | 5 +- .../linodego/object_storage_clusters.go | 2 + .../linode/linodego/object_storage_object.go | 2 + .../github.com/linode/linodego/pagination.go | 126 +- vendor/github.com/linode/linodego/regions.go | 3 +- vendor/github.com/linode/linodego/tags.go | 2 +- vendor/github.com/linode/linodego/types.go | 3 +- vendor/github.com/linode/linodego/waitfor.go | 136 +- vendor/github.com/miekg/dns/client.go | 53 +- vendor/github.com/miekg/dns/defaults.go | 20 +- vendor/github.com/miekg/dns/scan.go | 20 +- vendor/github.com/miekg/dns/scan_rr.go | 13 +- vendor/github.com/miekg/dns/singleinflight.go | 61 - vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/pkg/browser/LICENSE | 23 + vendor/github.com/pkg/browser/README.md | 55 + vendor/github.com/pkg/browser/browser.go | 57 + .../github.com/pkg/browser/browser_darwin.go | 5 + .../github.com/pkg/browser/browser_freebsd.go | 14 + .../github.com/pkg/browser/browser_linux.go | 21 + .../github.com/pkg/browser/browser_netbsd.go | 14 + .../github.com/pkg/browser/browser_openbsd.go | 14 + .../pkg/browser/browser_unsupported.go | 12 + .../github.com/pkg/browser/browser_windows.go | 7 + .../prometheus/prometheus/config/config.go | 102 +- .../prometheus/discovery/consul/consul.go | 2 + .../prometheus/discovery/file/file.go | 4 +- .../prometheus/discovery/hetzner/hcloud.go | 4 +- .../prometheus/discovery/hetzner/hetzner.go | 20 +- .../prometheus/discovery/hetzner/robot.go | 2 +- .../discovery/kubernetes/endpoints.go | 54 +- .../discovery/kubernetes/endpointslice.go | 36 +- .../kubernetes/endpointslice_adaptor.go | 10 + .../discovery/kubernetes/ingress.go | 28 +- .../discovery/kubernetes/ingress_adaptor.go | 25 +- .../discovery/kubernetes/kubernetes.go | 56 +- .../prometheus/discovery/kubernetes/node.go | 25 +- .../prometheus/discovery/kubernetes/pod.go | 21 +- .../discovery/kubernetes/service.go | 37 +- .../prometheus/discovery/linode/linode.go | 6 +- .../prometheus/discovery/marathon/marathon.go | 18 +- .../discovery/openstack/instance.go | 11 +- .../model/histogram/float_histogram.go | 492 +- .../prometheus/model/histogram/generic.go | 14 +- .../prometheus/model/labels/labels.go | 2 +- ...abels_string.go => labels_stringlabels.go} | 123 +- .../prometheus/model/labels/regexp.go | 17 + .../prometheus/model/relabel/relabel.go | 6 +- .../prometheus/model/textparse/interface.go | 10 +- .../model/textparse/openmetricsparse.go | 8 +- .../model/textparse/protobufparse.go | 96 +- .../prometheus/notifier/notifier.go | 53 +- .../prometheus/prometheus/prompb/README.md | 4 +- .../prompb/io/prometheus/client/metrics.pb.go | 3 + .../prompb/io/prometheus/client/metrics.proto | 3 + .../prometheus/prometheus/promql/engine.go | 348 +- .../prometheus/prometheus/promql/functions.go | 128 +- .../prometheus/prometheus/promql/fuzz.go | 2 +- .../prometheus/promql/parser/functions.go | 14 +- .../promql/parser/generated_parser.y | 202 +- .../promql/parser/generated_parser.y.go | 1274 +- .../prometheus/promql/parser/lex.go | 156 +- .../prometheus/promql/parser/parse.go | 285 +- .../prometheus/prometheus/promql/quantile.go | 37 +- .../prometheus/promql/query_logger.go | 22 +- .../prometheus/prometheus/promql/test.go | 262 +- .../promql/testdata/aggregators.test | 515 + .../promql/testdata/at_modifier.test | 174 + .../prometheus/promql/testdata/collision.test | 22 + .../prometheus/promql/testdata/functions.test | 1039 ++ .../promql/testdata/histograms.test | 235 + .../prometheus/promql/testdata/literals.test | 59 + .../promql/testdata/native_histograms.test | 226 + .../prometheus/promql/testdata/operators.test | 489 + .../prometheus/promql/testdata/selectors.test | 207 + .../prometheus/promql/testdata/staleness.test | 51 + .../prometheus/promql/testdata/subquery.test | 117 + .../promql/testdata/trig_functions.test | 101 + .../prometheus/prometheus/promql/value.go | 2 +- .../prometheus/prometheus/rules/manager.go | 36 +- .../prometheus/scrape/clientprotobuf.go | 54 + .../prometheus/prometheus/scrape/manager.go | 25 +- .../prometheus/prometheus/scrape/scrape.go | 205 +- .../prometheus/prometheus/scrape/target.go | 37 +- .../prometheus/prometheus/storage/buffer.go | 30 +- .../prometheus/storage/memoized_iterator.go | 43 +- .../storage/remote/azuread/README.md | 8 + .../storage/remote/azuread/azuread.go | 247 + .../prometheus/storage/remote/client.go | 9 + .../prometheus/storage/remote/codec.go | 75 +- .../prometheus/normalize_label.go | 41 + .../prometheus/normalize_name.go | 251 + .../prometheusremotewrite/helper.go | 560 + .../prometheusremotewrite/histograms.go | 158 + .../prometheusremotewrite/metrics_to_prw.go | 114 + .../number_data_points.go | 104 + .../storage/remote/queue_manager.go | 5 + .../prometheus/storage/remote/read_handler.go | 6 +- .../prometheus/storage/remote/storage.go | 7 + .../prometheus/storage/remote/write.go | 1 + .../storage/remote/write_handler.go | 89 +- .../prometheus/prometheus/storage/series.go | 99 +- .../prometheus/tsdb/chunkenc/chunk.go | 30 +- .../tsdb/chunkenc/float_histogram.go | 179 +- .../prometheus/tsdb/chunkenc/histogram.go | 204 +- .../tsdb/chunkenc/histogram_meta.go | 12 + .../prometheus/tsdb/chunkenc/xor.go | 16 +- .../prometheus/tsdb/chunks/chunks.go | 77 +- .../prometheus/tsdb/chunks/head_chunks.go | 20 +- .../{tsdbutil/chunks.go => chunks/samples.go} | 62 +- .../prometheus/prometheus/tsdb/compact.go | 10 +- .../prometheus/prometheus/tsdb/db.go | 101 +- .../prometheus/prometheus/tsdb/exemplar.go | 16 +- .../prometheus/prometheus/tsdb/head.go | 197 +- .../prometheus/prometheus/tsdb/head_append.go | 462 +- .../prometheus/prometheus/tsdb/head_read.go | 134 +- .../prometheus/prometheus/tsdb/head_wal.go | 83 +- .../prometheus/prometheus/tsdb/index/index.go | 22 +- .../prometheus/tsdb/index/postings.go | 25 +- .../prometheus/tsdb/index/postingsstats.go | 7 +- .../prometheus/prometheus/tsdb/isolation.go | 4 +- .../prometheus/tsdb/ooo_head_read.go | 33 +- .../prometheus/prometheus/tsdb/querier.go | 105 +- .../prometheus/tsdb/record/record.go | 286 +- .../prometheus/tsdb/tombstones/tombstones.go | 32 +- .../prometheus/tsdb/tsdbutil/histogram.go | 22 +- .../prometheus/prometheus/tsdb/wal.go | 2 +- .../prometheus/tsdb/wlog/checkpoint.go | 8 +- .../prometheus/tsdb/wlog/live_reader.go | 53 +- .../prometheus/prometheus/tsdb/wlog/reader.go | 36 +- .../prometheus/tsdb/wlog/watcher.go | 102 +- .../prometheus/prometheus/tsdb/wlog/wlog.go | 102 +- .../prometheus/prometheus/util/stats/timer.go | 33 +- .../prometheus/prometheus/web/api/v1/api.go | 391 +- .../prometheus/prometheus/web/api/v1/codec.go | 53 + .../prometheus/web/api/v1/json_codec.go | 231 + .../api/baremetal/v1/baremetal_sdk.go | 20 +- .../api/instance/v1/instance_sdk.go | 869 +- .../api/instance/v1/instance_utils.go | 43 +- .../internal/auth/access_key.go | 21 + .../scaleway-sdk-go/internal/auth/auth.go | 15 + .../scaleway-sdk-go/internal/auth/jwt.go | 55 + .../internal/generic/fields.go | 17 + .../scaleway-sdk-go/internal/generic/ptr.go | 11 + .../scaleway/scaleway-sdk-go/scw/client.go | 16 +- .../scaleway-sdk-go/scw/client_option.go | 10 +- .../scaleway/scaleway-sdk-go/scw/config.go | 2 +- .../scaleway-sdk-go/scw/custom_types.go | 2 +- .../scaleway/scaleway-sdk-go/scw/errors.go | 3 +- .../scaleway/scaleway-sdk-go/scw/request.go | 23 - .../scaleway-sdk-go/scw/request_header.go | 32 + .../scw/request_header_wasm.go | 32 + .../scaleway/scaleway-sdk-go/scw/transport.go | 2 +- .../bson/bsoncodec/array_codec.go | 10 +- .../mongo-driver/bson/bsoncodec/bsoncodec.go | 156 +- .../bson/bsoncodec/byte_slice_codec.go | 20 +- .../bson/bsoncodec/default_value_decoders.go | 126 +- .../bson/bsoncodec/default_value_encoders.go | 122 +- .../mongo-driver/bson/bsoncodec/doc.go | 73 +- .../bson/bsoncodec/empty_interface_codec.go | 16 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 36 +- .../bson/bsoncodec/pointer_codec.go | 6 + .../mongo-driver/bson/bsoncodec/registry.go | 456 +- .../bson/bsoncodec/slice_codec.go | 30 +- .../bson/bsoncodec/string_codec.go | 21 +- .../bson/bsoncodec/struct_codec.go | 157 +- .../bson/bsoncodec/struct_tag_parser.go | 11 +- .../mongo-driver/bson/bsoncodec/time_codec.go | 16 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 13 +- .../bsonoptions/byte_slice_codec_options.go | 11 + .../empty_interface_codec_options.go | 11 + .../bson/bsonoptions/map_codec_options.go | 15 + .../bson/bsonoptions/slice_codec_options.go | 11 + .../bson/bsonoptions/string_codec_options.go | 11 + .../bson/bsonoptions/struct_codec_options.go | 20 + .../bson/bsonoptions/time_codec_options.go | 11 + .../bson/bsonoptions/uint_codec_options.go | 11 + .../mongo-driver/bson/bsonrw/copier.go | 43 + .../bson/bsonrw/extjson_reader.go | 8 + .../bson/bsonrw/extjson_writer.go | 23 +- .../mongo-driver/bson/bsonrw/reader.go | 2 + .../mongo-driver/bson/bsonrw/value_reader.go | 8 + .../mongo-driver/bson/bsonrw/value_writer.go | 10 + .../mongo-driver/bson/bsonrw/writer.go | 9 + .../mongo-driver/bson/bsontype/bsontype.go | 9 +- .../mongo-driver/bson/decoder.go | 79 +- .../go.mongodb.org/mongo-driver/bson/doc.go | 5 +- .../mongo-driver/bson/encoder.go | 110 +- .../mongo-driver/bson/marshal.go | 193 +- .../mongo-driver/bson/primitive/decimal.go | 14 + .../mongo-driver/bson/primitive/objectid.go | 8 +- .../mongo-driver/bson/primitive/primitive.go | 46 +- .../mongo-driver/bson/primitive_codecs.go | 26 +- .../go.mongodb.org/mongo-driver/bson/raw.go | 23 +- .../mongo-driver/bson/raw_element.go | 7 +- .../mongo-driver/bson/raw_value.go | 13 +- .../mongo-driver/bson/registry.go | 15 +- .../go.mongodb.org/mongo-driver/bson/types.go | 15 +- .../mongo-driver/bson/unmarshal.go | 92 +- .../mongo-driver/x/bsonx/bsoncore/array.go | 10 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 19 - .../mongo-driver/x/bsonx/bsoncore/doc.go | 29 + .../mongo-driver/x/bsonx/bsoncore/document.go | 10 +- .../mongo-driver/x/bsonx/bsoncore/element.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/value.go | 23 +- .../collector/pdata/LICENSE | 202 + .../collector/pdata/internal/.gitignore | 2 + .../collector/pdata/internal/data/bytesid.go | 45 + .../collector/logs/v1/logs_service.pb.go | 844 ++ .../metrics/v1/metrics_service.pb.go | 844 ++ .../collector/trace/v1/trace_service.pb.go | 843 ++ .../data/protogen/common/v1/common.pb.go | 1721 +++ .../internal/data/protogen/logs/v1/logs.pb.go | 1762 +++ .../data/protogen/metrics/v1/metrics.pb.go | 6550 +++++++++ .../data/protogen/resource/v1/resource.pb.go | 381 + .../data/protogen/trace/v1/trace.pb.go | 2884 ++++ .../collector/pdata/internal/data/spanid.go | 79 + .../collector/pdata/internal/data/traceid.go | 79 + .../internal/generated_wrapper_byteslice.go | 19 + .../generated_wrapper_float64slice.go | 19 + .../generated_wrapper_instrumentationscope.go | 37 + .../internal/generated_wrapper_resource.go | 35 + .../internal/generated_wrapper_uint64slice.go | 19 + .../pdata/internal/json/attribute.go | 110 + .../collector/pdata/internal/json/enum.go | 29 + .../collector/pdata/internal/json/json.go | 22 + .../collector/pdata/internal/json/number.go | 103 + .../collector/pdata/internal/json/resource.go | 27 + .../collector/pdata/internal/json/scope.go | 31 + .../collector/pdata/internal/otlp/logs.go | 19 + .../collector/pdata/internal/otlp/metrics.go | 19 + .../collector/pdata/internal/otlp/traces.go | 19 + .../collector/pdata/internal/wrapper_logs.go | 35 + .../collector/pdata/internal/wrapper_map.go | 32 + .../pdata/internal/wrapper_metrics.go | 35 + .../collector/pdata/internal/wrapper_slice.go | 34 + .../pdata/internal/wrapper_traces.go | 35 + .../pdata/internal/wrapper_tracestate.go | 27 + .../collector/pdata/internal/wrapper_value.go | 31 + .../pdata/pcommon/generated_byteslice.go | 96 + .../pdata/pcommon/generated_float64slice.go | 96 + .../pcommon/generated_instrumentationscope.go | 87 + .../pdata/pcommon/generated_resource.go | 65 + .../pdata/pcommon/generated_uint64slice.go | 96 + .../collector/pdata/pcommon/map.go | 261 + .../collector/pdata/pcommon/slice.go | 155 + .../collector/pdata/pcommon/spanid.go | 36 + .../collector/pdata/pcommon/timestamp.go | 27 + .../collector/pdata/pcommon/trace_state.go | 41 + .../collector/pdata/pcommon/traceid.go | 37 + .../collector/pdata/pcommon/value.go | 465 + .../pdata/pmetric/aggregation_temporality.go | 34 + .../collector/pdata/pmetric/encoding.go | 30 + .../pdata/pmetric/exemplar_value_type.go | 27 + .../pdata/pmetric/generated_exemplar.go | 133 + .../pdata/pmetric/generated_exemplarslice.go | 128 + .../pmetric/generated_exponentialhistogram.go | 63 + ...generated_exponentialhistogramdatapoint.go | 215 + ...ed_exponentialhistogramdatapointbuckets.go | 64 + ...ated_exponentialhistogramdatapointslice.go | 143 + .../pdata/pmetric/generated_gauge.go | 51 + .../pdata/pmetric/generated_histogram.go | 62 + .../pmetric/generated_histogramdatapoint.go | 190 + .../generated_histogramdatapointslice.go | 143 + .../pdata/pmetric/generated_metric.go | 235 + .../pdata/pmetric/generated_metricslice.go | 143 + .../pmetric/generated_numberdatapoint.go | 135 + .../pmetric/generated_numberdatapointslice.go | 143 + .../pmetric/generated_resourcemetrics.go | 70 + .../pmetric/generated_resourcemetricsslice.go | 143 + .../pdata/pmetric/generated_scopemetrics.go | 70 + .../pmetric/generated_scopemetricsslice.go | 143 + .../collector/pdata/pmetric/generated_sum.go | 73 + .../pdata/pmetric/generated_summary.go | 51 + .../pmetric/generated_summarydatapoint.go | 114 + .../generated_summarydatapointslice.go | 143 + ...nerated_summarydatapointvalueatquantile.go | 67 + ...ed_summarydatapointvalueatquantileslice.go | 143 + .../collector/pdata/pmetric/json.go | 431 + .../pdata/pmetric/metric_data_point_flags.go | 28 + .../collector/pdata/pmetric/metric_type.go | 36 + .../collector/pdata/pmetric/metrics.go | 80 + .../pmetric/number_data_point_value_type.go | 27 + .../collector/pdata/pmetric/pb.go | 31 + .../generated_exportpartialsuccess.go | 67 + .../pdata/pmetric/pmetricotlp/grpc.go | 84 + .../pdata/pmetric/pmetricotlp/request.go | 66 + .../pdata/pmetric/pmetricotlp/response.go | 81 + .../collector/semconv/LICENSE | 202 + .../semconv/v1.6.1/generated_resource.go | 991 ++ .../semconv/v1.6.1/generated_trace.go | 1587 +++ .../collector/semconv/v1.6.1/nonstandard.go | 11 + .../collector/semconv/v1.6.1/schema.go | 9 + vendor/go.uber.org/multierr/CHANGELOG.md | 7 + vendor/go.uber.org/multierr/error.go | 32 +- .../go.uber.org/multierr/error_post_go120.go | 19 + .../go.uber.org/multierr/error_pre_go120.go | 20 + vendor/golang.org/x/exp/slices/slices.go | 52 +- vendor/golang.org/x/exp/slices/sort.go | 10 +- .../api/compute/v1/compute-api.json | 7259 ++++++++-- .../api/compute/v1/compute-gen.go | 3307 ++++- .../api/internal/gensupport/send.go | 21 + .../google.golang.org/api/internal/version.go | 2 +- .../admissionregistration/v1/generated.pb.go | 484 +- .../admissionregistration/v1/generated.proto | 73 + .../api/admissionregistration/v1/types.go | 73 + .../v1/types_swagger_doc_generated.go | 14 +- .../v1/zz_generated.deepcopy.go | 26 + .../v1alpha1/generated.pb.go | 1565 ++- .../v1alpha1/generated.proto | 228 +- .../admissionregistration/v1alpha1/types.go | 217 +- .../v1alpha1/types_swagger_doc_generated.go | 67 +- .../v1alpha1/zz_generated.deepcopy.go | 113 + .../v1beta1/generated.pb.go | 465 +- .../v1beta1/generated.proto | 73 + .../admissionregistration/v1beta1/types.go | 73 + .../v1beta1/types_swagger_doc_generated.go | 14 +- .../v1beta1/zz_generated.deepcopy.go | 26 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/apps/v1/generated.proto | 5 +- vendor/k8s.io/api/apps/v1/types.go | 5 +- .../apps/v1/types_swagger_doc_generated.go | 10 +- .../k8s.io/api/apps/v1beta1/generated.proto | 57 +- vendor/k8s.io/api/apps/v1beta1/types.go | 57 +- .../v1beta1/types_swagger_doc_generated.go | 60 +- .../k8s.io/api/apps/v1beta2/generated.proto | 7 +- vendor/k8s.io/api/apps/v1beta2/types.go | 7 +- .../v1beta2/types_swagger_doc_generated.go | 12 +- .../v1/types_swagger_doc_generated.go | 2 +- .../authentication/v1alpha1/generated.proto | 3 +- .../api/authentication/v1alpha1/types.go | 5 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../zz_generated.prerelease-lifecycle.go | 6 +- .../authentication/v1beta1/generated.pb.go | 476 +- .../authentication/v1beta1/generated.proto | 21 + .../api/authentication/v1beta1/register.go | 1 + .../api/authentication/v1beta1/types.go | 27 + .../v1beta1/types_swagger_doc_generated.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 44 + .../zz_generated.prerelease-lifecycle.go | 18 + .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 42 +- vendor/k8s.io/api/autoscaling/v1/types.go | 79 +- .../v1/types_swagger_doc_generated.go | 42 +- .../k8s.io/api/autoscaling/v2/generated.proto | 20 +- vendor/k8s.io/api/autoscaling/v2/types.go | 61 +- .../v2/types_swagger_doc_generated.go | 22 +- .../api/autoscaling/v2beta1/generated.proto | 2 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 2 +- .../v2beta1/types_swagger_doc_generated.go | 4 +- .../api/autoscaling/v2beta2/generated.proto | 24 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 62 +- .../v2beta2/types_swagger_doc_generated.go | 26 +- vendor/k8s.io/api/batch/v1/generated.proto | 32 +- vendor/k8s.io/api/batch/v1/types.go | 46 +- .../batch/v1/types_swagger_doc_generated.go | 30 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 317 +- .../k8s.io/api/batch/v1beta1/generated.proto | 15 +- vendor/k8s.io/api/batch/v1beta1/register.go | 1 - vendor/k8s.io/api/batch/v1beta1/types.go | 20 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 27 - .../zz_generated.prerelease-lifecycle.go | 18 - vendor/k8s.io/api/certificates/v1/types.go | 3 +- .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 24 + .../api/certificates/v1alpha1/generated.pb.go | 831 ++ .../api/certificates/v1alpha1/generated.proto | 103 + .../api/certificates/v1alpha1/register.go | 61 + .../k8s.io/api/certificates/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 60 + .../v1alpha1/zz_generated.deepcopy.go | 102 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/certificates/v1beta1/generated.proto | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/coordination/v1/generated.proto | 6 +- vendor/k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 8 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/annotation_key_constants.go | 21 +- vendor/k8s.io/api/core/v1/generated.pb.go | 2800 ++-- vendor/k8s.io/api/core/v1/generated.proto | 150 +- vendor/k8s.io/api/core/v1/toleration.go | 14 +- vendor/k8s.io/api/core/v1/types.go | 229 +- .../core/v1/types_swagger_doc_generated.go | 69 +- .../api/core/v1/zz_generated.deepcopy.go | 40 +- .../k8s.io/api/discovery/v1/generated.proto | 26 +- vendor/k8s.io/api/discovery/v1/types.go | 42 +- .../v1/types_swagger_doc_generated.go | 14 +- .../api/discovery/v1beta1/generated.proto | 13 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 30 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- .../events/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 11046 +++++----------- .../api/extensions/v1beta1/generated.proto | 289 +- .../k8s.io/api/extensions/v1beta1/register.go | 2 - vendor/k8s.io/api/extensions/v1beta1/types.go | 385 +- .../v1beta1/types_swagger_doc_generated.go | 164 +- .../v1beta1/zz_generated.deepcopy.go | 366 - .../zz_generated.prerelease-lifecycle.go | 48 - .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../v1beta3/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/networking/v1/generated.proto | 176 +- vendor/k8s.io/api/networking/v1/types.go | 188 +- .../v1/types_swagger_doc_generated.go | 120 +- .../api/networking/v1alpha1/generated.pb.go | 1199 +- .../api/networking/v1alpha1/generated.proto | 79 +- .../api/networking/v1alpha1/register.go | 12 +- .../k8s.io/api/networking/v1alpha1/types.go | 86 +- .../v1alpha1/types_swagger_doc_generated.go | 56 +- .../networking/v1alpha1/well_known_labels.go | 33 + .../v1alpha1/zz_generated.deepcopy.go | 97 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/networking/v1beta1/generated.proto | 78 +- vendor/k8s.io/api/networking/v1beta1/types.go | 87 +- .../v1beta1/types_swagger_doc_generated.go | 72 +- vendor/k8s.io/api/node/v1/generated.proto | 10 +- vendor/k8s.io/api/node/v1/types.go | 12 +- .../node/v1/types_swagger_doc_generated.go | 12 +- .../k8s.io/api/node/v1alpha1/generated.proto | 14 +- vendor/k8s.io/api/node/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../k8s.io/api/node/v1beta1/generated.proto | 12 +- vendor/k8s.io/api/node/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- vendor/k8s.io/api/policy/v1/generated.proto | 4 +- vendor/k8s.io/api/policy/v1/types.go | 4 +- .../policy/v1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/policy/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/policy/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../resource/{v1alpha1 => v1alpha2}/doc.go | 4 +- .../{v1alpha1 => v1alpha2}/generated.pb.go | 658 +- .../{v1alpha1 => v1alpha2}/generated.proto | 84 +- .../{v1alpha1 => v1alpha2}/register.go | 8 +- .../resource/{v1alpha1 => v1alpha2}/types.go | 92 +- .../types_swagger_doc_generated.go | 56 +- .../zz_generated.deepcopy.go | 61 +- .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/storage/v1/generated.proto | 126 +- vendor/k8s.io/api/storage/v1/types.go | 132 +- .../storage/v1/types_swagger_doc_generated.go | 82 +- .../api/storage/v1alpha1/generated.proto | 38 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 41 +- .../v1alpha1/types_swagger_doc_generated.go | 38 +- .../api/storage/v1beta1/generated.proto | 112 +- vendor/k8s.io/api/storage/v1beta1/types.go | 119 +- .../v1beta1/types_swagger_doc_generated.go | 78 +- .../apimachinery/pkg/api/equality/semantic.go | 49 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 +- .../apimachinery/pkg/api/validation/doc.go | 18 + .../pkg/api/validation/generic.go | 88 + .../pkg/api/validation/objectmeta.go | 265 + .../pkg/apis/meta/internalversion/defaults.go | 38 + .../pkg/apis/meta/internalversion/types.go | 25 + .../zz_generated.conversion.go | 2 + .../internalversion/zz_generated.deepcopy.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 385 +- .../pkg/apis/meta/v1/generated.proto | 69 +- .../apimachinery/pkg/apis/meta/v1/types.go | 69 +- .../meta/v1/types_swagger_doc_generated.go | 25 +- .../pkg/apis/meta/v1/validation/validation.go | 320 + .../apis/meta/v1/zz_generated.conversion.go | 7 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 5 + .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 2 + .../apimachinery/pkg/labels/selector.go | 131 +- .../pkg/runtime/schema/group_version.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 3 +- .../pkg/runtime/serializer/codec_factory.go | 3 +- .../serializer/versioning/versioning.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 2 +- .../apimachinery/pkg/types/namespacedname.go | 11 + .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 2 +- .../pkg/util/managedfields/endpoints.yaml | 7018 ++++++++++ .../pkg/util/managedfields/fieldmanager.go | 57 + .../managedfields/internal/atmostevery.go | 60 + .../internal/buildmanagerinfo.go | 74 + .../managedfields/internal/capmanagers.go | 133 + .../util/managedfields/internal/conflict.go | 89 + .../managedfields/internal/fieldmanager.go | 206 + .../pkg/util/managedfields/internal/fields.go | 47 + .../managedfields/internal/lastapplied.go | 50 + .../internal/lastappliedmanager.go | 171 + .../internal/lastappliedupdater.go | 102 + .../managedfields/internal/managedfields.go | 248 + .../internal/managedfieldsupdater.go | 82 + .../util/managedfields/internal/manager.go | 52 + .../managedfields/internal/pathelement.go | 140 + .../managedfields/internal/skipnonapplied.go | 91 + .../util/managedfields/internal/stripmeta.go | 90 + .../managedfields/internal/structuredmerge.go | 183 + .../managedfields/internal/typeconverter.go | 193 + .../internal/versionconverter.go | 123 + .../pkg/util/managedfields/node.yaml | 261 + .../pkg/util/managedfields/pod.yaml | 121 + .../pkg/util/managedfields/scalehandler.go | 174 + .../pkg/util/managedfields/typeconverter.go | 47 + .../k8s.io/apimachinery/pkg/util/sets/set.go | 14 + .../pkg/util/validation/validation.go | 8 +- .../apimachinery/pkg/util/wait/backoff.go | 502 + .../apimachinery/pkg/util/wait/delay.go | 51 + .../apimachinery/pkg/util/wait/error.go | 96 + .../k8s.io/apimachinery/pkg/util/wait/loop.go | 86 + .../k8s.io/apimachinery/pkg/util/wait/poll.go | 315 + .../apimachinery/pkg/util/wait/timer.go | 121 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 634 +- .../v1/matchcondition.go | 48 + .../v1/mutatingwebhook.go | 14 + .../v1/validatingwebhook.go | 14 + .../v1alpha1/admissionpolicyspec.go | 75 - .../v1alpha1/auditannotation.go | 48 + .../v1alpha1/expressionwarning.go | 48 + .../{paramsource.go => matchcondition.go} | 30 +- .../admissionregistration/v1alpha1/rule.go | 76 - .../v1alpha1/rulewithoperations.go | 85 - .../v1alpha1/typechecking.go | 44 + .../v1alpha1/validatingadmissionpolicy.go | 11 +- .../validatingadmissionpolicybindingspec.go | 21 +- .../v1alpha1/validatingadmissionpolicyspec.go | 28 + .../validatingadmissionpolicystatus.go | 66 + .../v1alpha1/validation.go | 15 +- .../v1beta1/matchcondition.go | 48 + .../v1beta1/mutatingwebhook.go | 14 + .../admissionregistration/v1beta1/rule.go | 76 - .../v1beta1/rulewithoperations.go | 85 - .../v1beta1/validatingwebhook.go | 14 + .../autoscaling/v2/podresourcemetricsource.go | 52 - .../v1alpha1/clustertrustbundle.go} | 95 +- .../v1alpha1/clustertrustbundlespec.go | 48 + .../applyconfigurations/core/v1/container.go | 58 +- .../core/v1/containerresizepolicy.go | 52 + .../core/v1/containerstatus.go | 40 +- .../core/v1/ephemeralcontainer.go | 13 + .../core/v1/ephemeralcontainercommon.go | 58 +- .../applyconfigurations/core/v1/podstatus.go | 9 + .../core/v1/servicespec.go | 8 +- .../extensions/v1beta1/allowedcsidriver.go | 39 - .../extensions/v1beta1/allowedflexvolume.go | 39 - .../extensions/v1beta1/allowedhostpath.go | 48 - .../v1beta1/fsgroupstrategyoptions.go | 57 - .../extensions/v1beta1/hostportrange.go | 48 - .../extensions/v1beta1/idrange.go | 48 - .../v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../applyconfigurations/internal/internal.go | 554 +- .../meta/v1/groupversionkind.go | 57 - .../applyconfigurations/meta/v1/listmeta.go | 66 - .../applyconfigurations/meta/v1/status.go | 142 - .../meta/v1/statuscause.go | 61 - .../meta/v1/statusdetails.go | 93 - .../v1alpha1/ipaddress.go} | 86 +- .../networking/v1alpha1/ipaddressspec.go | 39 + .../networking/v1alpha1/parentreference.go | 79 + .../allocationresult.go | 19 +- .../resource/v1alpha2/podschedulingcontext.go | 258 + .../podschedulingcontextspec.go} | 16 +- .../podschedulingcontextstatus.go} | 14 +- .../{v1alpha1 => v1alpha2}/resourceclaim.go | 16 +- .../resourceclaimconsumerreference.go | 2 +- .../resourceclaimparametersreference.go | 2 +- .../resourceclaimschedulingstatus.go | 2 +- .../resourceclaimspec.go | 8 +- .../resourceclaimstatus.go | 2 +- .../resourceclaimtemplate.go | 16 +- .../resourceclaimtemplatespec.go | 2 +- .../{v1alpha1 => v1alpha2}/resourceclass.go | 16 +- .../resourceclassparametersreference.go | 2 +- .../resource/v1alpha2/resourcehandle.go | 48 + .../discovery/aggregated_discovery.go | 102 +- .../client-go/discovery/discovery_client.go | 172 +- .../k8s.io/client-go/kubernetes/clientset.go | 29 +- vendor/k8s.io/client-go/kubernetes/doc.go | 7 +- .../client-go/kubernetes/scheme/register.go | 6 +- .../v1alpha1/validatingadmissionpolicy.go | 46 + .../v1beta1/authentication_client.go | 5 + .../v1beta1/generated_expansion.go | 2 + .../v1beta1/selfsubjectreview.go | 64 + .../v1alpha1/certificates_client.go | 107 + .../v1alpha1/clustertrustbundle.go | 197 + .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go | 21 + .../typed/events/v1beta1/event_expansion.go | 3 +- .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/podsecuritypolicy.go | 197 - .../v1alpha1/generated_expansion.go | 2 + .../typed/networking/v1alpha1/ipaddress.go | 197 + .../networking/v1alpha1/networking_client.go | 5 + .../typed/resource/v1alpha1/podscheduling.go | 256 - .../kubernetes/typed/resource/v1alpha2/doc.go | 20 + .../generated_expansion.go | 4 +- .../resource/v1alpha2/podschedulingcontext.go | 256 + .../{v1alpha1 => v1alpha2}/resource_client.go | 46 +- .../{v1alpha1 => v1alpha2}/resourceclaim.go | 56 +- .../resourceclaimtemplate.go | 44 +- .../{v1alpha1 => v1alpha2}/resourceclass.go | 44 +- vendor/k8s.io/client-go/openapi/OWNERS | 4 + vendor/k8s.io/client-go/openapi/client.go | 7 +- .../k8s.io/client-go/openapi/groupversion.go | 42 +- vendor/k8s.io/client-go/pkg/version/base.go | 3 +- vendor/k8s.io/client-go/rest/client.go | 3 +- vendor/k8s.io/client-go/rest/request.go | 45 +- vendor/k8s.io/client-go/rest/with_retry.go | 18 +- .../client-go/tools/cache/controller.go | 92 +- .../client-go/tools/cache/delta_fifo.go | 142 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 14 +- .../k8s.io/client-go/tools/cache/reflector.go | 477 +- .../client-go/tools/cache/shared_informer.go | 147 +- .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 120 + .../k8s.io/client-go/tools/metrics/metrics.go | 17 + vendor/k8s.io/client-go/util/cert/cert.go | 2 +- .../util/workqueue/delaying_queue.go | 61 +- .../client-go/util/workqueue/metrics.go | 9 +- .../k8s.io/client-go/util/workqueue/queue.go | 53 +- .../util/workqueue/rate_limiting_queue.go | 61 +- vendor/k8s.io/klog/v2/format.go | 65 + .../klog/v2/internal/serialize/keyvalues.go | 47 +- vendor/k8s.io/klog/v2/k8s_references.go | 12 +- vendor/k8s.io/klog/v2/klog.go | 13 + .../k8s.io/kube-openapi/pkg/cached/cache.go | 308 + .../kube-openapi/pkg/handler3/handler.go | 217 +- .../pkg/internal/handler/handler_cache.go | 57 - vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/k8s.io/utils/pointer/README.md | 3 + vendor/k8s.io/utils/pointer/pointer.go | 410 + vendor/k8s.io/utils/trace/trace.go | 19 + vendor/modules.txt | 237 +- .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 389 + .../v4/schema/elements.go | 3 +- .../v4/schema/schemaschema.go | 2 +- .../structured-merge-diff/v4/typed/merge.go | 11 +- .../structured-merge-diff/v4/typed/typed.go | 7 +- .../v4/typed/validate.go | 6 + .../v4/value/mapreflect.go | 2 +- .../v4/value/mapunstructured.go | 8 +- .../v4/value/reflectcache.go | 4 +- 1294 files changed, 151929 insertions(+), 36816 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/fs.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/security.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go delete mode 100644 vendor/github.com/digitalocean/godo/tokens.go delete mode 100644 vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE create mode 100644 vendor/github.com/fatih/color/color_windows.go create mode 100644 vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go create mode 100644 vendor/github.com/hashicorp/consul/api/internal.go create mode 100644 vendor/github.com/hashicorp/cronexpr/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS create mode 100644 vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go create mode 100644 vendor/github.com/hashicorp/nomad/api/node_pools.go delete mode 100644 vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go delete mode 100644 vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/LICENSE (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/action.go (89%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/architecture.go (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/certificate.go (92%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/client.go (98%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/datacenter.go (82%) create mode 100644 vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/deprecation.go rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/error.go (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/firewall.go (94%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/floating_ip.go (96%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/hcloud.go (72%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/helper.go (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/image.go (95%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/internal/instrumentation/metrics.go (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/iso.go (80%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/labels.go (87%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/load_balancer.go (97%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/load_balancer_type.go (86%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/location.go (84%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/network.go (92%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/placement_group.go (96%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/pricing.go (98%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/primary_ip.go (88%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/rdns.go (80%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/resource.go (89%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema.go (97%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/action.go (93%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/certificate.go (97%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/datacenter.go (84%) create mode 100644 vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/deprecation.go rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/error.go (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/firewall.go (98%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/floating_ip.go (95%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/image.go (95%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/iso.go (94%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/load_balancer.go (91%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/load_balancer_type.go (94%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/location.go (95%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/meta.go (100%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/network.go (75%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/placement_group.go (92%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/pricing.go (97%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/primary_ip.go (83%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/server.go (93%) create mode 100644 vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server_type.go rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/ssh_key.go (97%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/schema/volume.go (95%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/server.go (98%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/server_type.go (83%) rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/ssh_key.go (95%) create mode 100644 vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/testing.go rename vendor/github.com/hetznercloud/hcloud-go/{ => v2}/hcloud/volume.go (97%) create mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md create mode 100644 vendor/github.com/imdario/mergo/SECURITY.md create mode 100644 vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go create mode 100644 vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/kylelemons/godebug/LICENSE create mode 100644 vendor/github.com/kylelemons/godebug/diff/diff.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/.gitignore create mode 100644 vendor/github.com/kylelemons/godebug/pretty/doc.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/public.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/reflect.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/structure.go create mode 100644 vendor/github.com/linode/linodego/CODEOWNERS delete mode 100644 vendor/github.com/linode/linodego/mongo.go delete mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/pkg/browser/LICENSE create mode 100644 vendor/github.com/pkg/browser/README.md create mode 100644 vendor/github.com/pkg/browser/browser.go create mode 100644 vendor/github.com/pkg/browser/browser_darwin.go create mode 100644 vendor/github.com/pkg/browser/browser_freebsd.go create mode 100644 vendor/github.com/pkg/browser/browser_linux.go create mode 100644 vendor/github.com/pkg/browser/browser_netbsd.go create mode 100644 vendor/github.com/pkg/browser/browser_openbsd.go create mode 100644 vendor/github.com/pkg/browser/browser_unsupported.go create mode 100644 vendor/github.com/pkg/browser/browser_windows.go rename vendor/github.com/prometheus/prometheus/model/labels/{labels_string.go => labels_stringlabels.go} (89%) create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/aggregators.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/at_modifier.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/collision.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/functions.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/histograms.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/literals.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/native_histograms.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/operators.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/selectors.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/staleness.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/subquery.test create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/trig_functions.test create mode 100644 vendor/github.com/prometheus/prometheus/scrape/clientprotobuf.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/azuread/README.md create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go rename vendor/github.com/prometheus/prometheus/tsdb/{tsdbutil/chunks.go => chunks/samples.go} (56%) create mode 100644 vendor/github.com/prometheus/prometheus/web/api/v1/codec.go create mode 100644 vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go create mode 100644 vendor/github.com/scaleway/scaleway-sdk-go/internal/auth/access_key.go create mode 100644 vendor/github.com/scaleway/scaleway-sdk-go/internal/auth/jwt.go create mode 100644 vendor/github.com/scaleway/scaleway-sdk-go/internal/generic/fields.go create mode 100644 vendor/github.com/scaleway/scaleway-sdk-go/internal/generic/ptr.go create mode 100644 vendor/github.com/scaleway/scaleway-sdk-go/scw/request_header.go create mode 100644 vendor/github.com/scaleway/scaleway-sdk-go/scw/request_header_wasm.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/.gitignore create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/encoding.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/exemplar_value_type.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_data_point_flags.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_type.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/number_data_point_value_type.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/nonstandard.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/schema.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/doc.go (84%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/generated.pb.go (83%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/generated.proto (79%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/register.go (95%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/types.go (81%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/types_swagger_doc_generated.go (76%) rename vendor/k8s.io/api/resource/{v1alpha1 => v1alpha2}/zz_generated.deepcopy.go (88%) create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/delay.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/loop.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/poll.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/timer.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go rename vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/{paramsource.go => matchcondition.go} (50%) delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go rename vendor/k8s.io/client-go/applyconfigurations/{resource/v1alpha1/podscheduling.go => certificates/v1alpha1/clustertrustbundle.go} (64%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/podsecuritypolicy.go => networking/v1alpha1/ipaddress.go} (66%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/allocationresult.go (76%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1/podschedulingspec.go => v1alpha2/podschedulingcontextspec.go} (67%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1/podschedulingstatus.go => v1alpha2/podschedulingcontextstatus.go} (64%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaim.go (96%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimconsumerreference.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimparametersreference.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimschedulingstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimspec.go (93%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimtemplate.go (96%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclaimtemplatespec.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclass.go (96%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha1 => v1alpha2}/resourceclassparametersreference.go (99%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go rename vendor/k8s.io/client-go/kubernetes/typed/{resource => certificates}/v1alpha1/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/generated_expansion.go (92%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resource_client.go (66%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resourceclaim.go (79%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resourceclaimtemplate.go (80%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha1 => v1alpha2}/resourceclass.go (80%) create mode 100644 vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 vendor/k8s.io/klog/v2/format.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/cached/cache.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/README.md create mode 100644 vendor/k8s.io/utils/pointer/pointer.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go diff --git a/go.mod b/go.mod index 0a3df4ff7..670f381e3 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 github.com/prometheus/common v0.45.0 - github.com/prometheus/prometheus v0.44.0 + github.com/prometheus/prometheus v0.47.2 go.uber.org/atomic v1.11.0 go.uber.org/zap v1.26.0 golang.org/x/net v0.17.0 @@ -35,42 +35,47 @@ require ( ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.22.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a // indirect collectd.org v0.5.0 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.28 // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/apache/arrow/go/v11 v11.0.0 // indirect + github.com/apache/arrow/go/v12 v12.0.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.44.245 // indirect + github.com/aws/aws-sdk-go v1.44.302 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 // indirect - github.com/digitalocean/godo v1.98.0 // indirect + github.com/digitalocean/godo v1.99.0 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v24.0.7+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect - github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect - github.com/fatih/color v1.14.1 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/envoyproxy/go-control-plane v0.11.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect @@ -79,13 +84,13 @@ require ( github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.3 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect @@ -99,43 +104,44 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20230406165453-00490a63f317 // indirect + github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect github.com/google/s2a-go v0.1.4 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gophercloud/gophercloud v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/gophercloud/gophercloud v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/hashicorp/consul/api v1.20.0 // indirect - github.com/hashicorp/cronexpr v1.1.1 // indirect + github.com/hashicorp/consul/api v1.22.0 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 // indirect + github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hetznercloud/hcloud-go v1.42.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.0.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/influxdata/flux v0.194.1 // indirect github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6 // indirect - github.com/ionos-cloud/sdk-go/v6 v6.1.6 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.8 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jsternberg/zap-logfmt v1.2.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef // indirect - github.com/klauspost/compress v1.15.9 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect - github.com/linode/linodego v1.16.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/linode/linodego v1.19.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/miekg/dns v1.1.53 // indirect + github.com/miekg/dns v1.1.55 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -152,12 +158,13 @@ require ( github.com/ovh/go-ovh v1.4.1 // indirect github.com/philhofer/fwd v1.0.0 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/alertmanager v0.25.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 // indirect github.com/sergi/go-diff v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 // indirect @@ -165,16 +172,18 @@ require ( github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/willf/bitset v1.1.10 // indirect github.com/xlab/treeprint v1.1.0 // indirect - go.mongodb.org/mongo-driver v1.11.3 // indirect + go.mongodb.org/mongo-driver v1.12.0 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect + go.opentelemetry.io/collector/semconv v0.81.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect go.opentelemetry.io/otel v1.18.0 // indirect go.opentelemetry.io/otel/metric v1.18.0 // indirect go.opentelemetry.io/otel/trace v1.18.0 // indirect go.uber.org/goleak v1.2.1 // indirect - go.uber.org/multierr v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect @@ -182,23 +191,23 @@ require ( golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.0 // indirect - google.golang.org/api v0.126.0 // indirect + google.golang.org/api v0.132.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/api v0.26.2 // indirect - k8s.io/apimachinery v0.26.2 // indirect - k8s.io/client-go v0.26.2 // indirect - k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect - k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect + k8s.io/api v0.27.3 // indirect + k8s.io/apimachinery v0.27.3 // indirect + k8s.io/client-go v0.27.3 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect ) replace github.com/influxdata/influxdb => github.com/attack/influxdb v1.8.4-0.20230516101340-aeacf4fd10d8 diff --git a/go.sum b/go.sum index d02eb5e46..b79ac561a 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,7 @@ cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34h cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= @@ -88,7 +88,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.50.0 h1:RscMV6LbnAmhAzD893Lv9nXXy2WCaJmbxYPWDLbGqNQ= +cloud.google.com/go/bigquery v1.52.0 h1:JKLNdxI0N+TIUWD6t9KN646X27N5dQWq9dZbbTWZ8hc= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= @@ -123,8 +123,8 @@ cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y= +cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -214,7 +214,7 @@ cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3Q cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= @@ -233,7 +233,7 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6 cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= @@ -414,14 +414,21 @@ filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmG github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= @@ -441,6 +448,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -458,8 +467,8 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0 github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -485,8 +494,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ= -github.com/apache/arrow/go/v11 v11.0.0 h1:hqauxvFQxww+0mEU/2XHG6LT7eZternCZq+A5Yly2uM= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= +github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= github.com/apache/arrow/go/v7 v7.0.1 h1:WpCfq+AQxvXaI6/KplHE27MPMFx5av0o5NbPCTAGfy4= github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -509,8 +518,8 @@ github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.44.156/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= +github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.11.0 h1:HxyD62DyNhCfiFGUHqJ/xITD6rAjJ7Dm/2nLxLmO4Ag= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI= @@ -586,8 +595,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0= github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= @@ -598,8 +608,8 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgo github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.95.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= -github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= -github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= @@ -615,8 +625,9 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -630,8 +641,8 @@ github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -644,13 +655,13 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -658,8 +669,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= @@ -714,12 +725,14 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= @@ -732,8 +745,8 @@ github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0D github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= -github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= @@ -744,8 +757,9 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= @@ -902,8 +916,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= -github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -916,8 +930,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99 github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -928,13 +942,13 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v1.1.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= -github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= +github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -955,13 +969,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= +github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= -github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -973,21 +988,21 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= -github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -996,10 +1011,10 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -1014,16 +1029,16 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7/go.mod h1:xYYd4dybIhRhhzDemKx7Ddt8CvCosgrEek8YM7/cF0A= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= -github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go v1.39.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA= -github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= -github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= +github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -1034,8 +1049,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/flux v0.194.1 h1:px4QX33389KC4GGgkB4QjQHzBMEtGr3WBYYBmUq5fjM= @@ -1060,8 +1075,8 @@ github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68Z github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/ionos-cloud/sdk-go/v6 v6.1.3/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= -github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= -github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= +github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -1100,8 +1115,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1122,13 +1137,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/linode/linodego v1.12.0/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= -github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= -github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= +github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= +github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -1156,8 +1172,9 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -1175,8 +1192,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= -github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= @@ -1296,6 +1313,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1306,8 +1324,9 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= @@ -1362,8 +1381,8 @@ github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= -github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= -github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= +github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI= +github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -1372,14 +1391,14 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -1389,7 +1408,7 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shoenig/test v0.6.0/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0= -github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1398,8 +1417,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -1445,7 +1464,6 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= @@ -1464,8 +1482,10 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -1494,8 +1514,8 @@ go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4x go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.11.0/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= -go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= -go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= +go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1505,6 +1525,10 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= +go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= +go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= @@ -1546,8 +1570,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -1600,8 +1624,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1844,6 +1868,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2076,8 +2101,8 @@ google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= +google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2209,12 +2234,12 @@ google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614G google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= +google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= +google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= +google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2309,7 +2334,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -2323,30 +2347,30 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= -k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= +k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= -k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= +k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= -k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= -k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= +k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -2354,8 +2378,9 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index e939b9f5e..eddfee04b 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.20.1" +const Version = "1.22.0" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 000000000..8206a57c7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,611 @@ +# Release History + +## 1.7.0 (2023-07-12) + +### Features Added +* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing. + +### Breaking Changes +> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2 +* The beta features for CAE, tracing, and fakes have been omitted for this release. + +## 1.7.0-beta.2 (2023-06-06) + +### Breaking Changes +> These changes affect only code written against beta version v1.7.0-beta.1 +* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed. + * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`. +* Method `AddError()` has been removed from type `tracing.Span`. +* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`. + +## 1.6.1 (2023-06-06) + +### Bugs Fixed +* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. + +### Other Changes +* This version contains all bug fixes from `v1.7.0-beta.1` + +## 1.7.0-beta.1 (2023-05-24) + +### Features Added +* Restored CAE support for ARM clients. +* Added supporting features to enable distributed tracing. + * Added func `runtime.StartSpan()` for use by SDKs to start spans. + * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. + * Added field `TracingNamespace` to `runtime.PipelineOptions`. + * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. + * Added field `SpanFromContext` to `tracing.TracerOptions`. + * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. + * Added supporting pipeline policies to include HTTP spans when creating clients. +* Added package `fake` to support generated fakes packages in SDKs. + * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. + * Added an internal fake poller implementation. + +### Bugs Fixed +* Retry policy always clones the underlying `*http.Request` before invoking the next policy. +* Added some non-standard error codes to the list of error codes for unregistered resource providers. + +## 1.6.0 (2023-05-04) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. + +## 1.5.0 (2023-04-06) + +### Features Added +* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +> These features will return in v1.6.0-beta.1. +* Removed `TokenRequestOptions.Claims` and `.TenantID` +* Removed ARM client support for CAE and cross-tenant auth. + +### Bugs Fixed +* Added non-conformant LRO terminal states `Cancelled` and `Completed`. + +### Other Changes +* Updated to latest `internal` module. + +## 1.5.0-beta.1 (2023-03-02) + +### Features Added +* This release includes the features added in v1.4.0-beta.1 + +## 1.4.0 (2023-03-02) +> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. + +### Features Added +* Add `Clone()` method for `arm/policy.ClientOptions`. + +### Bugs Fixed +* ARM's RP registration policy will no longer swallow unrecognized errors. +* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. +* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. + +## 1.4.0-beta.1 (2023-02-02) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. + +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 000000000..35a74e18d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 000000000..aab921853 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 000000000..9d077a3e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 000000000..985b1bde2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 000000000..9fae2a9dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,132 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken = exported.AccessToken + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential = exported.TokenCredential + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer + + // cached on the client to support shallow copying with new values + tp tracing.Provider + modVer string +} + +// NewClient creates a new Client instance with the provided values. +// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider. +// if module and package are the same value, the "module/" prefix can be omitted. +// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + mod, client, err := shared.ExtractModuleName(clientName) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(client, moduleVersion) + + return &Client{ + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + }, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} + +// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName. +// Note that the values for module name and version will be preserved from the source Client. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +func (c *Client) WithClientName(clientName string) *Client { + tr := c.tp.NewTracer(clientName, c.modVer) + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 000000000..28c64678c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 000000000..17bd50c67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 000000000..23ea7e7c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 000000000..a1236b362 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "io" + "net/http" + "time" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string + + // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in + // their configured default tenants when this field isn't set. + TenantID string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 000000000..c44efd6ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 000000000..fa99d1b7e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,182 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} + +// not exported but dependent on Request + +// PolicyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type PolicyFunc func(*Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 000000000..7df2f88c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,144 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := exported.Payload(resp, nil) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 000000000..0684cb317 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 000000000..b05bd8b38 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !poller.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 000000000..2bb9e105b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := poller.StatusInProgress + provState, err := poller.GetProvisioningState(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = poller.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = poller.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = poller.StatusSucceeded + return p.CurState, nil + } + state, err := poller.GetProvisioningState(resp) + if errors.Is(err, poller.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = poller.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 000000000..d6be89876 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := poller.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = poller.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = poller.StatusSucceeded + } else { + p.CurState = poller.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 000000000..1bc7ad0ac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,145 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !poller.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := poller.StatusInProgress + status, err := poller.GetStatus(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 000000000..37ed647f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 000000000..d8d86a46c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,187 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !poller.StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return azexported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp, nil) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 000000000..53c8d353a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXMSClientRequestID = "x-ms-client-request-id" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.7.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 000000000..db0aaa7cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or +// "package", "package.Client" from "package.Client" when there's no "module/" prefix. +// If clientName is malformed, an error is returned. +func ExtractModuleName(clientName string) (string, string, error) { + // uses unnamed capturing for "module", "package.Client", and "package" + regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`) + if err != nil { + return "", "", err + } + + matches := regex.FindStringSubmatch(clientName) + if len(matches) < 4 { + return "", "", fmt.Errorf("malformed clientName %s", clientName) + } + + // the first match is the entire string, the second is "module", the third is + // "package.Client" and the fourth is "package". + // if there was no "module/" prefix, the second match will be the empty string + if matches[1] != "" { + return matches[1], matches[2], nil + } + return matches[3], matches[2], nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 000000000..2f3901bff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 000000000..7bde29d0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 000000000..fad2579ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 000000000..b20004783 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 60 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int + + // ShouldRetry evaluates if the retry policy should retry the request. + // When specified, the function overrides comparison against the list of + // HTTP status codes and error checking within the retry policy. Context + // and NonRetriable errors remain evaluated before calling ShouldRetry. + // The *http.Response and error parameters are mutually exclusive, i.e. + // if one is nil, the other is not nil. + // A return value of true means the retry policy should retry. + ShouldRetry func(*http.Response, error) bool +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions = exported.TokenRequestOptions + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token + // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't + // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a + // token from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the + // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible + // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's + // given credential. Implementations that need to perform I/O should use the Request's context, available from + // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, + // the policy will return any 401 response to the client. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 000000000..c9cfa438c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 000000000..6d03b291e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 000000000..5507665d6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 000000000..9d9288f53 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,66 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 000000000..e5309aa6c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 000000000..b61e4c121 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] + // the following fields are read-only + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy + tro policy.TokenRequestOptions +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + if err != nil { + return exported.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } + return &BearerTokenPolicy{ + authzHandler: opts.AuthorizationHandler, + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + var err error + if b.authzHandler.OnRequest != nil { + err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + } else { + err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) + } + if err != nil { + return nil, ensureNonRetriable(err) + } + + res, err := req.Next() + if err != nil { + return nil, err + } + + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + res, err = req.Next() + } + } + } + return res, ensureNonRetriable(err) +} + +func ensureNonRetriable(err error) error { + var nre errorinfo.NonRetriable + if err != nil && !errors.As(err, &nre) { + err = btpError{err} + } + return err +} + +// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize +type btpError struct { + error +} + +func (btpError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 000000000..99dc029f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 000000000..770e0a2b6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 000000000..4714baa30 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 000000000..8514f57d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := getAllowedQueryParams(o.AllowedQueryParams) + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// getAllowedQueryParams merges the default set of allowed query parameters +// with a custom set (usually comes from client options). +func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range customAllowedQP { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return allowedQP +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// getSanitizedURL returns a sanitized string for the provided url.URL +func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { + // redact applicable query params + qp := u.Query() + for k := range qp { + if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + u.RawQuery = qp.Encode() + return u.String() +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 000000000..360a7f211 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 000000000..e0c5929f3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,262 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + clone := req.Clone(req.Raw().Context()) + resp, err = clone.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if exported.PayloadDownloaded(resp) { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if options.ShouldRetry != nil { + // a non-nil ShouldRetry overrides our HTTP status code check + if !options.ShouldRetry(resp, err) { + // predicate says we shouldn't retry + log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") + return + } + } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 000000000..2abcdc576 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 000000000..3d029a3d1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,327 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !poller.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 000000000..98e007184 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,248 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "os" + "path" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 000000000..d1f58e9e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp, nil) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + _, err := exported.Payload(resp, &exported.PayloadOptions{ + BytesModifier: func(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + }, + }) + if err != nil { + return err + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 000000000..869bed511 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 000000000..cadaef3d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 000000000..fbcd48311 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 000000000..80282d4ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 000000000..75f757ced --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 000000000..cc8034cf7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,409 @@ +# Release History + +## 1.3.0 (2023-05-09) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.5 +* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` +* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` + +### Other Changes +* Upgraded to MSAL v1.0.0 + +## 1.3.0-beta.5 (2023-04-11) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.4 +* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`. + The constructor now reads default configuration from environment variables set by the Azure + workload identity webhook by default. + ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478)) +* Removed CAE support. It will return in v1.4.0-beta.1 + ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479)) + +### Bugs Fixed +* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances. + +## 1.3.0-beta.4 (2023-03-08) + +### Features Added +* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery` + +### Bugs Fixed +* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines + ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044)) + +### Other Changes +* Upgraded dependencies + +## 1.2.2 (2023-03-07) + +### Other Changes +* Upgraded dependencies + +## 1.3.0-beta.3 (2023-02-07) + +### Features Added +* By default, credentials set client capability "CP1" to enable support for + [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + This indicates to Azure Active Directory that your application can handle CAE claims challenges. + You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". +* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login + prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) +* Service principal and user credentials support ADFS authentication on Azure Stack. + Specify "adfs" as the credential's tenant. +* Applications running in private or disconnected clouds can prevent credentials from + requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + field on credential options. +* Many credentials can now be configured to authenticate in multiple tenants. The + options types for these credentials have an `AdditionallyAllowedTenants` field + that specifies additional tenants in which the credential may authenticate. + +## 1.3.0-beta.2 (2023-01-10) + +### Features Added +* Added `OnBehalfOfCredential` to support the on-behalf-of flow + ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642)) + +### Bugs Fixed +* `AzureCLICredential` reports token expiration in local time (should be UTC) + +### Other Changes +* `AzureCLICredential` imposes its default timeout only when the `Context` + passed to `GetToken()` has no deadline +* Added `NewCredentialUnavailableError()`. This function constructs an error indicating + a credential can't authenticate and an encompassing `ChainedTokenCredential` should + try its next credential, if any. + +## 1.3.0-beta.1 (2022-12-13) + +### Features Added +* `WorkloadIdentityCredential` and `DefaultAzureCredential` support + Workload Identity Federation on Kubernetes. `DefaultAzureCredential` + support requires environment variable configuration as set by the + Workload Identity webhook. + ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615)) + +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 000000000..4ac53eb7b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 000000000..da0baa9ad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,243 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Azure Active +Directory (AAD). It offers a variety of credential types capable of acquiring +an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. +1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource +|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Azure Active Directory errors please refer to the +Azure Active Directory +[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 000000000..7b7515eba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,205 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + + +## Troubleshoot AzureCliCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + + +## Troubleshoot `WorkloadIdentityCredential` authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json new file mode 100644 index 000000000..47e77f88e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/azidentity", + "Tag": "go/azidentity_6225ab0470" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 000000000..739ff49c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "os" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAdditionallyAllowedTenants = "AZURE_ADDITIONALLY_ALLOWED_TENANTS" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" +) + +var ( + // capability CP1 indicates the client application is capable of handling CAE claims challenges + cp1 = []string{"CP1"} + // CP1 is disabled until CAE support is added back + disableCP1 = true +) + +var getConfidentialClient = func(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidentialClient, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + authority := runtime.JoinPaths(authorityHost, tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, clientID, cred, o...) +} + +var getPublicClient = func(clientID, tenantID string, co *azcore.ClientOptions, additionalOpts ...public.Option) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, public.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(clientID, o...) +} + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// validTenantID return true is it receives a valid tenantID, returns false otherwise +func validTenantID(tenantID string) bool { + match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) + if err != nil { + return false + } + return match +} + +func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { + pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) + return pipelineAdapter{pl: pl} +} + +type pipelineAdapter struct { + pl runtime.Pipeline +} + +func (p pipelineAdapter) CloseIdleConnections() { + // do nothing +} + +func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + resp, err := p.pl.Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type confidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) + AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type publicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 000000000..33ff13c09 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,180 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const ( + credNameAzureCLI = "AzureCLICredential" + timeoutCLIRequest = 10 * time.Second +) + +// used by tests to fake invoking the CLI +type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + tokenProvider azureCLITokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultTokenProvider() + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + s *syncer + tokenProvider azureCLITokenProvider +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c := AzureCLICredential{tokenProvider: cp.tokenProvider} + c.s = newSyncer(credNameAzureCLI, cp.TenantID, cp.AdditionallyAllowedTenants, c.requestToken, c.requestToken) + return &c, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + // CLI expects an AAD v1 resource, not a v2 scope + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + b, err := c.tokenProvider(ctx, opts.Scopes[0], opts.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + at, err := c.createAccessToken(b) + if err != nil { + return azcore.AccessToken{}, err + } + return at, nil +} + +func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + } + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil + } +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + // the Azure CLI's "expiresOn" is local time + exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: exp.UTC(), + } + return converted, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 000000000..dc855edf7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,138 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var ( + err error + errs []error + successfulCredential azcore.TokenCredential + token azcore.AccessToken + unavailableErr *credentialUnavailableError + ) + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + // continue to the next source iff this one returned credentialUnavailableError + if !errors.As(err, &unavailableErr) { + break + } + } + if c.iterating { + c.cond.L.Lock() + // this is nil when all credentials returned an error + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res, err) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 000000000..3b443e8ee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,47 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + RunLiveTests: true + ServiceDirectory: 'azidentity' + PreSteps: + - pwsh: | + [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream + Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS + [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream + env: + PFX_CONTENTS: $(net-identity-spcert-pfx) + PEM_CONTENTS: $(net-identity-spcert-pem) + SNI_CONTENTS: $(net-identity-spcert-sni) + EnvVars: + AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) + AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) + AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) + IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) + IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) + IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) + IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem + IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx + IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 000000000..d9d22996c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,83 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient + s *syncer +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + cac := ClientAssertionCredential{client: c} + cac.s = newSyncer(credNameAssertion, tenantID, options.AdditionallyAllowedTenants, cac.requestToken, cac.silentAuth) + return &cac, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientAssertionCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientAssertionCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 000000000..804eba899 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,172 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client confidentialClient + s *syncer +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + var o []confidential.Option + if options.SendCertificateChain { + o = append(o, confidential.WithX5C()) + } + o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + if err != nil { + return nil, err + } + cc := ClientCertificateCredential{client: c} + cc.s = newSyncer(credNameCert, tenantID, options.AdditionallyAllowedTenants, cc.requestToken, cc.silentAuth) + return &cc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientCertificateCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientCertificateCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. +// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 000000000..dda21f6b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client confidentialClient + s *syncer +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if options == nil { + options = &ClientSecretCredentialOptions{} + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + c, err := getConfidentialClient( + clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + csc := ClientSecretCredential{client: c} + csc.s = newSyncer(credNameSecret, tenantID, options.AdditionallyAllowedTenants, csc.requestToken, csc.silentAuth) + return &csc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientSecretCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientSecretCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 000000000..1e3efdc97 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add + // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be + // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID identifies the tenant the Azure CLI should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping +// when one provides a token: +// +// - [EnvironmentCredential] +// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload +// identity webhook. Use [WorkloadIdentityCredential] directly when not using the webhook or needing +// more control over its configuration. +// - [ManagedIdentityCredential] +// - [AzureCLICredential] +// +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + additionalTenants := options.AdditionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + additionallyAllowedTenants: additionalTenants, + }) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + // workload identity requires values for AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID + wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + }) + if err == nil { + creds = append(creds, wic) + } else { + errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) + } + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + miCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) + if err != nil { + return nil, err + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { + errorMessage := strings.Join(errorMessages, "\n\t") + + if numberOfSuccessfulCredentials == 0 { + return errors.New(errorMessage) + } + + if len(errorMessages) != 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) + } + + return nil +} + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(*credentialUnavailableError); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) + +// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available +type timeoutWrapper struct { + mic *ManagedIdentityCredential + // timeout applies to all auth attempts until one doesn't time out + timeout time.Duration +} + +// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout +// because managed identity may not be available and connecting to IMDS can take several minutes to time out. +func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var tk azcore.AccessToken + var err error + // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section + if w.timeout > 0 { + c, cancel := context.WithTimeout(ctx, w.timeout) + defer cancel() + tk, err = w.mic.GetToken(c, opts) + if isAuthFailedDueToContext(err) { + err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out") + } else { + // some managed identity implementation is available, so don't apply the timeout to future calls + w.timeout = 0 + } + } else { + tk, err = w.mic.GetToken(ctx, opts) + } + return tk, err +} + +// unwraps nested AuthenticationFailedErrors to get the root error +func isAuthFailedDueToContext(err error) bool { + for { + var authFailedErr *AuthenticationFailedError + if !errors.As(err, &authFailedErr) { + break + } + err = authFailedErr.err + } + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 000000000..108e83c43 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,136 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Azure Active Directory. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + account public.Account + client publicClient + s *syncer + prompt func(context.Context, DeviceCodeMessage) error +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient( + cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + cred := DeviceCodeCredential{client: c, prompt: cp.UserPrompt} + cred.s = newSyncer(credNameDeviceCode, cp.TenantID, cp.AdditionallyAllowedTenants, cred.requestToken, cred.silentAuth) + return &cred, nil +} + +// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *DeviceCodeCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes, public.WithTenantID(opts.TenantID)) + if err != nil { + return azcore.AccessToken{}, err + } + err = c.prompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err != nil { + return azcore.AccessToken{}, err + } + ar, err := dc.AuthenticationResult(ctx) + if err != nil { + return azcore.AccessToken{}, err + } + c.account = ar.Account + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *DeviceCodeCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 000000000..7ecd928e0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit + // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential + // directly should set that variable instead. This field should remain unexported to preserve this credential's + // unambiguous "all configuration from environment variables" design. + additionallyAllowedTenants []string +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// # Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// # Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. +// +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +// +// # Configuration for multitenant applications +// +// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants +// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set +// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv(azureTenantID) + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + // tenants set by NewDefaultAzureCredential() override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS + additionalTenants := options.additionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) + if err != nil { + return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + } + o := &ClientCertificateCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 000000000..86d8976a4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string + err error +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := io.ReadAll(e.RawResponse.Body) + e.RawResponse.Body.Close() + if err != nil { + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + } else { + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + case credNameWorkloadIdentity: + anchor = "workload" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required +// data or state +type credentialUnavailableError struct { + message string +} + +// newCredentialUnavailableError is an internal helper that ensures consistent error message formatting +func newCredentialUnavailableError(credType, message string) error { + msg := fmt.Sprintf("%s: %s", credType, message) + return &credentialUnavailableError{msg} +} + +// NewCredentialUnavailableError constructs an error indicating a credential can't attempt authentication +// because it lacks required data or state. When [ChainedTokenCredential] receives this error it will try +// its next credential, if any. +func NewCredentialUnavailableError(message string) error { + return &credentialUnavailableError{message} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *credentialUnavailableError) Error() string { + return e.message +} + +// NonRetriable is a marker method indicating this error should not be retried. It has no implementation. +func (e *credentialUnavailableError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 000000000..4868d22c3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,106 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameBrowser = "InteractiveBrowserCredential" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. + LoginHint string + // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + // only when setting ClientID, and must match a redirect URI in the application's registration. + // Applications which have registered "http://localhost" as a redirect URI need not set this option. + RedirectURL string + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + account public.Account + client publicClient + options InteractiveBrowserCredentialOptions + s *syncer +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + ibc := InteractiveBrowserCredential{client: c, options: cp} + ibc.s = newSyncer(credNameBrowser, cp.TenantID, cp.AdditionallyAllowedTenants, ibc.requestToken, ibc.silentAuth) + return &ibc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *InteractiveBrowserCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenInteractive(ctx, opts.Scopes, + public.WithLoginHint(c.options.LoginHint), + public.WithRedirectURI(c.options.RedirectURL), + public.WithTenantID(opts.TenantID), + ) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *InteractiveBrowserCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 000000000..1aa1e0fc7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 000000000..d7b4a32a5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,388 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + msiEndpoint = "MSI_ENDPOINT" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + serviceFabricAPIVersion = "2019-07-01-preview" + + qpClientID = "client_id" + qpResID = "mi_res_id" +) + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +// managedIdentityClient provides the base for authenticating in managed identity environments +// This type includes an runtime.Pipeline and TokenCredentialOptions. +type managedIdentityClient struct { + pipeline runtime.Pipeline + msiType msiType + endpoint string + id ManagedIDKind +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 429 and all 5xx + // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + env = "Cloud Shell" + c.endpoint = endpoint + c.msiType = msiTypeCloudShell + } else { + setIMDSRetryOptionDefaults(&cp.Retry) + } + c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.pipeline.Do(msg) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + } + + if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.pipeline.Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", errors.New("did not receive a value from WWW-Authenticate header") + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + pos := strings.LastIndex(header, "=") + if pos == -1 { + return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + } + key, err := os.ReadFile(header[pos+1:]) + if err != nil { + return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 000000000..c6710ae52 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,127 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: +// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + client confidentialClient + mic *managedIdentityClient + s *syncer +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + mic, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + // similarly, it's okay to give MSAL an incorrect authority URL because that URL won't be used + c, err := confidential.New("https://login.microsoftonline.com/common", clientID, cred) + if err != nil { + return nil, err + } + m := ManagedIdentityCredential{client: c, mic: mic} + m.s = newSyncer(credNameManagedIdentity, "", nil, m.requestToken, m.silentAuth) + return &m, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + return azcore.AccessToken{}, err + } + // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *ManagedIdentityCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ManagedIdentityCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go new file mode 100644 index 000000000..3e173f47d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameOBO = "OnBehalfOfCredential" + +// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by +// middle-tier services that authorize requests to other services with a delegated user identity. Because this +// is not an interactive authentication flow, an application using it must have admin consent for any delegated +// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// +// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +type OnBehalfOfCredential struct { + assertion string + client confidentialClient + s *syncer +} + +// OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential +type OnBehalfOfCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. + // This setting controls whether the credential sends the public certificate chain in the x5c header of each + // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. + SendCertificateChain bool +} + +// NewOnBehalfOfCredentialWithCertificate constructs an OnBehalfOfCredential that authenticates with a certificate. +// See [ParseCertificates] for help loading a certificate. +func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion string, certs []*x509.Certificate, key crypto.PrivateKey, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +// NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. +func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred confidential.Credential, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if options == nil { + options = &OnBehalfOfCredentialOptions{} + } + opts := []confidential.Option{} + if options.SendCertificateChain { + opts = append(opts, confidential.WithX5C()) + } + opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) + if err != nil { + return nil, err + } + obo := OnBehalfOfCredential{assertion: userAssertion, client: c} + obo.s = newSyncer(credNameOBO, tenantID, options.AdditionallyAllowedTenants, obo.requestToken, obo.requestToken) + return &obo, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return o.s.GetToken(ctx, opts) +} + +func (o *OnBehalfOfCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := o.client.AcquireTokenOnBehalfOf(ctx, o.assertion, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go new file mode 100644 index 000000000..ae3855599 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type authFn func(context.Context, policy.TokenRequestOptions) (azcore.AccessToken, error) + +// syncer synchronizes authentication calls so that goroutines can share a credential instance +type syncer struct { + addlTenants []string + authing bool + cond *sync.Cond + reqToken, silent authFn + name, tenant string +} + +func newSyncer(name, tenant string, additionalTenants []string, reqToken, silentAuth authFn) *syncer { + return &syncer{ + addlTenants: resolveAdditionalTenants(additionalTenants), + cond: &sync.Cond{L: &sync.Mutex{}}, + name: name, + reqToken: reqToken, + silent: silentAuth, + tenant: tenant, + } +} + +// GetToken ensures that only one goroutine authenticates at a time +func (s *syncer) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var at azcore.AccessToken + var err error + if len(opts.Scopes) == 0 { + return at, errors.New(s.name + ".GetToken() requires at least one scope") + } + // we don't resolve the tenant for managed identities because they can acquire tokens only from their home tenants + if s.name != credNameManagedIdentity { + tenant, err := s.resolveTenant(opts.TenantID) + if err != nil { + return at, err + } + opts.TenantID = tenant + } + auth := false + s.cond.L.Lock() + defer s.cond.L.Unlock() + for { + at, err = s.silent(ctx, opts) + if err == nil { + // got a token + break + } + if !s.authing { + // this goroutine will request a token + s.authing, auth = true, true + break + } + // another goroutine is acquiring a token; wait for it to finish, then try silent auth again + s.cond.Wait() + } + if auth { + s.authing = false + at, err = s.reqToken(ctx, opts) + s.cond.Broadcast() + } + if err != nil { + // Return credentialUnavailableError directly because that type affects the behavior of credential chains. + // Otherwise, return AuthenticationFailedError. + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(s.name, err.Error(), res, err) + } + } else if log.Should(EventAuthentication) { + scope := strings.Join(opts.Scopes, ", ") + msg := fmt.Sprintf(`%s.GetToken() acquired a token for scope "%s"\n`, s.name, scope) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// resolveTenant returns the correct tenant for a token request given the credential's +// configuration, or an error when the specified tenant isn't allowed by that configuration +func (s *syncer) resolveTenant(requested string) (string, error) { + if requested == "" || requested == s.tenant { + return s.tenant, nil + } + if s.tenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(requested) { + return "", errors.New(tenantIDValidationErr) + } + for _, t := range s.addlTenants { + if t == "*" || t == requested { + return requested, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, s.name, requested) +} + +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } + } + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 000000000..8e652e33f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + account public.Account + client publicClient + password, username string + s *syncer +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + upc := UsernamePasswordCredential{client: c, password: password, username: username} + upc.s = newSyncer(credNameUserPassword, tenantID, options.AdditionallyAllowedTenants, upc.requestToken, upc.silentAuth) + return &upc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *UsernamePasswordCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password, public.WithTenantID(opts.TenantID)) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *UsernamePasswordCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 000000000..1a526b2e8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.3.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go new file mode 100644 index 000000000..7bfb34367 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameWorkloadIdentity = "WorkloadIdentityCredential" + +// WorkloadIdentityCredential supports Azure workload identity on Kubernetes. +// See [Azure Kubernetes Service documentation] for more information. +// +// [Azure Kubernetes Service documentation]: https://learn.microsoft.com/azure/aks/workload-identity-overview +type WorkloadIdentityCredential struct { + assertion, file string + cred *ClientAssertionCredential + expires time.Time + mtx *sync.RWMutex +} + +// WorkloadIdentityCredentialOptions contains optional parameters for WorkloadIdentityCredential. +type WorkloadIdentityCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. + TenantID string + // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the + // environment variable AZURE_FEDERATED_TOKEN_FILE. + TokenFilePath string +} + +// NewWorkloadIdentityCredential constructs a WorkloadIdentityCredential. Service principal configuration is read +// from environment variables as set by the Azure workload identity webhook. Set options to override those values. +func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (*WorkloadIdentityCredential, error) { + if options == nil { + options = &WorkloadIdentityCredentialOptions{} + } + ok := false + clientID := options.ClientID + if clientID == "" { + if clientID, ok = os.LookupEnv(azureClientID); !ok { + return nil, errors.New("no client ID specified. Check pod configuration or set ClientID in the options") + } + } + file := options.TokenFilePath + if file == "" { + if file, ok = os.LookupEnv(azureFederatedTokenFile); !ok { + return nil, errors.New("no token file specified. Check pod configuration or set TokenFilePath in the options") + } + } + tenantID := options.TenantID + if tenantID == "" { + if tenantID, ok = os.LookupEnv(azureTenantID); !ok { + return nil, errors.New("no tenant ID specified. Check pod configuration or set TenantID in the options") + } + } + w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco) + if err != nil { + return nil, err + } + // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" + cred.s.name = credNameWorkloadIdentity + w.cred = cred + return &w, nil +} + +// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return w.cred.GetToken(ctx, opts) +} + +// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. +// Kubernetes is responsible for updating the file as service account tokens expire. +func (w *WorkloadIdentityCredential) getAssertion(context.Context) (string, error) { + w.mtx.RLock() + if w.expires.Before(time.Now()) { + // ensure only one goroutine at a time updates the assertion + w.mtx.RUnlock() + w.mtx.Lock() + defer w.mtx.Unlock() + // double check because another goroutine may have acquired the write lock first and done the update + if now := time.Now(); w.expires.Before(now) { + content, err := os.ReadFile(w.file) + if err != nil { + return "", err + } + w.assertion = string(content) + // Kubernetes rotates service account tokens when they reach 80% of their total TTL. The shortest TTL + // is 1 hour. That implies the token we just read is valid for at least 12 minutes (20% of 1 hour), + // but we add some margin for safety. + w.expires = now.Add(10 * time.Minute) + } + } else { + defer w.mtx.RUnlock() + } + return w.assertion, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 000000000..245af7d2b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 000000000..66bf13e5f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 000000000..8c6eacb61 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 000000000..ade7b348e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go new file mode 100644 index 000000000..d4ed6ccc8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "io" + "net/http" +) + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// PayloadOptions contains the optional values for the Payload func. +// NOT exported but used by azcore. +type PayloadOptions struct { + // BytesModifier receives the downloaded byte slice and returns an updated byte slice. + // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). + BytesModifier func([]byte) []byte +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload() WITHOUT the opts parameter. +func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + modifyBytes := func(b []byte) []byte { return b } + if opts != nil && opts.BytesModifier != nil { + modifyBytes = opts.BytesModifier + } + + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*nopClosingBytesReader); ok { + bytesBody := modifyBytes(buf.Bytes()) + buf.Set(bytesBody) + return bytesBody, nil + } + + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + bytesBody = modifyBytes(bytesBody) + resp.Body = &nopClosingBytesReader{s: bytesBody} + return bytesBody, nil +} + +// PayloadDownloaded returns true if the response body has already been downloaded. +// This implies that the Payload() func above has been previously called. +// NOT exported but used by azcore. +func PayloadDownloaded(resp *http.Response) bool { + _, ok := resp.Body.(*nopClosingBytesReader) + return ok +} + +// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type nopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *nopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*nopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *nopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 000000000..d7876d297 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 000000000..4f1dcf1b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go new file mode 100644 index 000000000..db8269627 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go @@ -0,0 +1,155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package poller + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// these are non-conformant states that we've seen in the wild. +// we support them for back-compat. +const ( + StatusCancelled = "Cancelled" + StatusCompleted = "Completed" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return Failed(s) || Succeeded(s) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]any, error) { + body, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]any + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]any) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]any) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]any) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 000000000..238ef42ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 000000000..a3824bee8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 000000000..278ac9cd1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index aafdf021f..211c98d1e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -6,33 +6,33 @@ generated Go code. The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, and Responding. A typical pattern is: - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) Each phase relies on decorators to modify and / or manage processing. Decorators may first modify and then pass the data along, pass the data first and then modify the result, or wrap themselves around passing the data (such as a logger might do). Decorators run in the order provided. For example, the following: - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) will set the URL to: - https://microsoft.com/a/b/c + https://microsoft.com/a/b/c Preparers and Responders may be shared and re-used (assuming the underlying decorators support sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 1328f1764..868345db6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -214,7 +214,7 @@ func (r Resource) String() string { // See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. func ParseResourceID(resourceID string) (Resource, error) { - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$` resourceIDPattern := regexp.MustCompile(resourceIDPatternText) match := resourceIDPattern.FindStringSubmatch(resourceID) diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index 3467b8fa6..d35b3850a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -60,9 +60,9 @@ func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { // is especially useful if there is a chance the data will fail to decode. // encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v // is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) { + err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) + return } // TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 000000000..3d8b93bc7 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 000000000..19210883b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +import "context" + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportHints are suggestions for storing data. +type ExportHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ReplaceHints are suggestions for loading data. +type ReplaceHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ExportReplace exports and replaces in-memory cache data. It doesn't support nil Context or +// define the outcome of passing one. A Context without a timeout must receive a default timeout +// specified by the implementor. Retries must be implemented inside the implementation. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. Implementors should honor + // Context cancellations and return context.Canceled or context.DeadlineExceeded in those cases. + Replace(ctx context.Context, cache Unmarshaler, hints ReplaceHints) error + // Export writes the binary representation of the cache (cache.Marshal()) to external storage. + // This is considered opaque. Context cancellations should be honored as in Replace. + Export(ctx context.Context, cache Marshaler, hints ExportHints) error +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 000000000..6612feb4b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,685 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net +added, it doesn't exist in real life. As such I've put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) + + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from a certificate or chain of certificates and an RSA private key +// as returned by [CertFromPEM]. +func NewCredFromCert(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + cred *accesstokens.Credential +} + +// clientOptions are optional settings for New(). These options are set using various functions +// returning Option calls. +type clientOptions struct { + accessor cache.ExportReplace + authority, azureRegion string + capabilities []string + disableInstanceDiscovery, sendX5C bool + httpClient ops.HTTPClient +} + +// Option is an optional argument to New(). +type Option func(o *clientOptions) + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *clientOptions) { + o.sendX5C = true + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *clientOptions) { + o.azureRegion = val + } +} + +// New is the constructor for Client. authority is the URL of a token authority such as "https://login.microsoftonline.com/". +// If the Client will connect directly to AD FS, use "adfs" for the tenant. clientID is the application's client ID (also called its +// "application ID"). +func New(authority, clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + + opts := clientOptions{ + authority: authority, + // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache + disableInstanceDiscovery: cred.tokenProvider != nil, + httpClient: shared.DefaultClient, + } + for _, o := range options { + o(&opts) + } + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.accessor), + base.WithClientCapabilities(opts.capabilities), + base.WithInstanceDiscovery(!opts.disableInstanceDiscovery), + base.WithRegionDetection(opts.azureRegion), + base.WithX5C(opts.sendX5C), + } + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), baseOpts...) + if err != nil { + return Client{}, err + } + base.AuthParams.IsConfidentialClient = true + + return Client{base: base, cred: internalCred}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByCredentialOptions: + t.claims = claims + case *acquireTokenOnBehalfOfOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByCredentialOptions: + t.tenantID = tenantID + case *acquireTokenOnBehalfOfOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + if o.claims != "" { + return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims") + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: o.account.IsZero(), + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential +type acquireTokenByCredentialOptions struct { + claims, tenantID string +} + +// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential +type AcquireByCredentialOption interface { + acquireByCredOption() +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) { + o := acquireTokenByCredentialOptions{} + err := options.ApplyOptions(&o, opts) + if err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + authParams.Claims = o.claims + + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf +type acquireTokenOnBehalfOfOptions struct { + claims, tenantID string +} + +// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf +type AcquireOnBehalfOfOption interface { + acquireOBOOption() +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) { + o := acquireTokenOnBehalfOfOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Claims: o.claims, + Credential: cca.cred, + TenantID: o.tenantID, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(ctx context.Context, accountID string) (Account, error) { + return cca.base.Account(ctx, accountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(ctx context.Context, account Account) error { + return cca.base.RemoveAccount(ctx, account) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 000000000..7ef7862fe --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS stuff we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 000000000..c9b8dbed0 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := io.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 000000000..5f68384f6 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,467 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In production it's a *storage.Manager or *storage.PartitionedManager. +type manager interface { + cache.Serializer + Read(context.Context, authority.AuthParams) (storage.TokenResponse, error) + Write(authority.AuthParams, accesstokens.TokenResponse) (shared.Account, error) +} + +// accountManager is a manager that also caches accounts. In production it's a *storage.Manager. +type accountManager interface { + manager + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + TenantID string + UserAssertion string + AuthorizationType authority.AuthorizeType + Claims string +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + Claims string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential + TenantID string +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Claims string + Credential *accesstokens.Credential + TenantID string + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager accountManager // *storage.Manager or fakeManager in tests + // pmanager is a partitioned cache for OBO authentication. *storage.PartitionedManager or fakeManager in tests + pmanager manager + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace + cacheAccessorMu *sync.RWMutex +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) error + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) error { + if ca != nil { + c.cacheAccessor = ca + } + return nil + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(c *Client) error { + var err error + if len(capabilities) > 0 { + cc, err := authority.NewClientCapabilities(capabilities) + if err == nil { + c.AuthParams.Capabilities = cc + } + } + return err + } +} + +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) error { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + return nil + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) error { + c.AuthParams.SendX5C = sendX5C + return nil + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.Region = region + return nil + } +} + +func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled + c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled + return nil + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessorMu: &sync.RWMutex{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + if err = o(&client); err != nil { + break + } + } + return client, err + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + claims, err := authParams.MergeCapabilitiesAndClaims() + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if claims != "" { + v.Add("claims", claims) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.LoginHint != "" { + v.Add("login_hint", authParams.LoginHint) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + if authParams.DomainHint != "" { + v.Add("domain_hint", authParams.DomainHint) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + ar := AuthResult{} + // when tenant == "", the caller didn't specify a tenant and WithTenant will choose the client's configured tenant + tenant := silent.TenantID + authParams, err := b.AuthParams.WithTenant(tenant) + if err != nil { + return ar, err + } + authParams.Scopes = silent.Scopes + authParams.HomeAccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.Claims = silent.Claims + authParams.UserAssertion = silent.UserAssertion + + m := b.pmanager + if authParams.AuthorizationType != authority.ATOnBehalfOf { + authParams.AuthorizationType = authority.ATRefreshToken + m = b.manager + } + if b.cacheAccessor != nil { + key := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessorMu.RLock() + err = b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + b.cacheAccessorMu.RUnlock() + } + if err != nil { + return ar, err + } + storageTokenResponse, err := m.Read(ctx, authParams) + if err != nil { + return ar, err + } + + // ignore cached access tokens when given claims + if silent.Claims == "" { + ar, err = AuthResultFromStorage(storageTokenResponse) + if err == nil { + return ar, err + } + } + + // redeem a cached refresh token, if available + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return ar, errors.New("no token found") + } + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return ar, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Claims = authCodeParams.Claims + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + var ar AuthResult + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + TenantID: onBehalfOfParams.TenantID, + Claims: onBehalfOfParams.Claims, + } + ar, err := b.AcquireTokenSilent(ctx, silentParameters) + if err == nil { + return ar, err + } + authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.Claims = onBehalfOfParams.Claims + authParams.Scopes = onBehalfOfParams.Scopes + authParams.UserAssertion = onBehalfOfParams.UserAssertion + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err == nil { + ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + } + return ar, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + var m manager = b.manager + if authParams.AuthorizationType == authority.ATOnBehalfOf { + m = b.pmanager + } + key := token.CacheKey(authParams) + if b.cacheAccessor != nil { + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + err := b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return AuthResult{}, err + } + } + account, err := m.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + ar, err := NewAuthResult(token, account) + if err == nil && b.cacheAccessor != nil { + err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) + } + return ar, err +} + +func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return nil, err + } + } + return b.manager.AllAccounts(), nil +} + +func (b Client) Account(ctx context.Context, homeAccountID string) (shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + authParams := b.AuthParams // This is a copy, as we don't have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeAccountID = homeAccountID + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return shared.Account{}, err + } + } + return b.manager.Account(homeAccountID), nil +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(ctx context.Context, account shared.Account) error { + if b.cacheAccessor == nil { + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return nil + } + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return err + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 000000000..548c2faeb --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + return strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + return strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + return strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 000000000..87d7d797b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,436 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err == nil { + tr.AccessToken = accessToken + } + idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err == nil { + tr.Account = account + } + return tr, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 000000000..add751925 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,517 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + homeAccountID := authParameters.HomeAccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + tr.AccessToken = accessToken + + if homeAccountID == "" { + // caller didn't specify a user, so there's no reason to search for an ID or refresh token + return tr, nil + } + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(homeAccountID, aliases, realm) + if err == nil { + tr.Account = account + } + return tr, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } + } + } + } + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokens { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json new file mode 100644 index 000000000..1d8181924 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json @@ -0,0 +1,56 @@ +{ + "Account": { + "uid.utid-login.windows.net-contoso": { + "username": "John Doe", + "local_account_id": "object1234", + "realm": "contoso", + "environment": "login.windows.net", + "home_account_id": "uid.utid", + "authority_type": "MSSTS" + } + }, + "RefreshToken": { + "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { + "target": "s2 s1 s3", + "environment": "login.windows.net", + "credential_type": "RefreshToken", + "secret": "a refresh token", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "AccessToken": { + "an-entry": { + "foo": "bar" + }, + "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { + "environment": "login.windows.net", + "credential_type": "AccessToken", + "secret": "an access token", + "realm": "contoso", + "target": "s2 s1 s3", + "client_id": "my_client_id", + "cached_at": "1000", + "home_account_id": "uid.utid", + "extended_expires_on": "4600", + "expires_on": "4600" + } + }, + "IdToken": { + "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { + "realm": "contoso", + "environment": "login.windows.net", + "credential_type": "IdToken", + "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "unknownEntity": {"field1":"1","field2":"whats"}, + "AppMetadata": { + "AppMetadata-login.windows.net-my_client_id": { + "environment": "login.windows.net", + "client_id": "my_client_id" + } + } + } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 000000000..7b673e3fe --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 000000000..09edb01b7 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 000000000..2238521f5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(v, &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 000000000..cef442f25 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 000000000..df5dc6e11 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 000000000..07751544a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 000000000..a1c99621e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 000000000..04236ff31 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

      Authentication complete. You can return to the application. Feel free to close this browser tab.

      + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

      Authentication failed. You can return to the application. Feel free to close this browser tab.

      +

      Error details: error %s error_description: %s

      + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 000000000..ebd86e2ba --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,353 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := scopeError(req.AuthParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + Claims: authParams.Claims, + CorrelationID: uuid.New().String(), + Scopes: scopes, + TenantID: authParams.AuthorityInfo.Tenant, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + if len(scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tr, err := t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm from authority: %w", err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + err = fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + return accesstokens.TokenResponse{}, err + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + err = fmt.Errorf("problem getting SAML token info: %w", err) + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil + case authority.Managed: + if len(authParams.Scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := io.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := scopeError(authParams); err != nil { + return DeviceCode{}, err + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} + +// scopeError takes an authority.AuthParams and returns an error +// if len(AuthParams.Scope) == 0. +func scopeError(a authority.AuthParams) error { + // TODO(someone): we could look deeper at the message to determine if + // it's a scope error, but this is a good start. + /* + {error":"invalid_scope","error_description":"AADSTS1002012: The provided value for scope + openid offline_access profile is not valid. Client credential flows must have a scope value + with /.default suffixed to the resource identifier (application ID URI)...} + */ + if len(a.Scopes) == 0 { + return fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which is invalid") + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 000000000..fa6bb61c8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,451 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) + + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + if err := addClaims(qv, req.AuthParams); err != nil { + return TokenResponse{}, err + } + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + if err := addClaims(qv, authParams); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return DeviceCodeResult{}, err + } + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +// addClaims adds client capabilities and claims from AuthParams to the given url.Values +func addClaims(v url.Values, ap authority.AuthParams) error { + claims, err := ap.MergeCapabilitiesAndClaims() + if err == nil && claims != "" { + v.Set("claims", claims) + } + return err +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 000000000..3bec4a67c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 000000000..b3892bf3f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// HomeAccountID creates the home account ID. +func (c ClientInfo) HomeAccountID() string { + if c.UID == "" { + return "" + } else if c.UTID == "" { + return fmt.Sprintf("%s.%s", c.UID, c.UID) + } else { + return fmt.Sprintf("%s.%s", c.UID, c.UTID) + } +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.ClientInfo.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + return strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 000000000..7b2ccb4f5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,552 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + autoDetectRegion = "TryAutoDetect" +) + +// These are various hosts that host AAD Instance discovery endpoints. +const ( + defaultHost = "login.microsoftonline.com" + loginMicrosoft = "login.microsoft.com" + loginWindows = "login.windows.net" + loginSTSWindows = "sts.windows.net" + loginMicrosoftOnline = defaultHost +) + +// jsonCaller is an interface that allows us to mock the JSONCall method. +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +// OAuthResponseBase is the base JSON return message for an OAuth call. +// This is embedded in other calls to get the base fields from every response. +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeAccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string + // Capabilities the client will include with each token request, for example "CP1". + // Call [NewClientCapabilities] to construct a value for this field. + Capabilities ClientCapabilities + // Claims required for an access token to satisfy a conditional access policy + Claims string + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string + // LoginHint is a username with which to pre-populate account selection during interactive auth + LoginHint string + // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page + DomainHint string +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + } +} + +// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given +// ID is empty, the copy is identical to the original. This function returns an error in +// several cases: +// - ID isn't specific (for example, it's "common") +// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority) +// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint +// - the resulting authority URL is invalid +func (p AuthParams) WithTenant(ID string) (AuthParams, error) { + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) + if err == nil { + info.Region = p.AuthorityInfo.Region + p.AuthorityInfo = info + } + return p, err +} + +// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter. +func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) { + claims := p.Claims + if len(p.Capabilities.asMap) > 0 { + if claims == "" { + // without claims the result is simply the capabilities + return p.Capabilities.asJSON, nil + } + // Otherwise, merge claims and capabilties into a single JSON object. + // We handle the claims challenge as a map because we don't know its structure. + var challenge map[string]any + if err := json.Unmarshal([]byte(claims), &challenge); err != nil { + return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err) + } + if err := merge(p.Capabilities.asMap, challenge); err != nil { + return "", err + } + b, err := json.Marshal(challenge) + if err != nil { + return "", err + } + claims = string(b) + } + return claims, nil +} + +// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value. +func merge(a, b map[string]any) error { + for k, av := range a { + if bv, ok := b[k]; !ok { + // b doesn't contain this key => simply set it to a's value + b[k] = av + } else { + // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't + // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice + // because the challenge will be from AAD, which knows the capabilities format. + if A, ok := av.(map[string]any); ok { + if B, ok := bv.(map[string]any); ok { + return merge(A, B) + } else { + // b[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } else { + // a[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } + } + return nil +} + +// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims. +// [NewClientCapabilities] precomputes these representations because capabilities are static for the +// lifetime of a client and are included with every authentication request i.e., these computations +// always have the same result and would otherwise have to be repeated for every request. +type ClientCapabilities struct { + // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims + asJSON string + // asMap is for merging the capabilities with challenge claims + asMap map[string]any +} + +func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) { + c := ClientCapabilities{} + var err error + if len(capabilities) > 0 { + cpbs := make([]string, len(capabilities)) + for i := 0; i < len(cpbs); i++ { + cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i]) + } + c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ",")) + // note our JSON is valid but we can't stop users breaking it with garbage like "}" + err = json.Unmarshal([]byte(c.asJSON), &c.asMap) + } + return c, err +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string + InstanceDiscoveryDisabled bool +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { + u, err := url.Parse(strings.ToLower(authority)) + if err != nil || u.Scheme != "https" { + return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + } + + tenant, err := firstPathSegment(u) + if err != nil { + return Info{}, err + } + authorityType := AAD + if tenant == "adfs" { + authorityType = ADFS + } + + // u.Host includes the port, if any, which is required for private cloud deployments + return Info{ + Host: u.Host, + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + InstanceDiscoveryDisabled: instanceDiscoveryDisabled, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case loginMicrosoft, loginWindows, loginSTSWindows, defaultHost: + environment = loginMicrosoft + } + + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeAccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 000000000..10039773b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 000000000..7d9ec7cd3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = io.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: io.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = io.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return io.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 000000000..4d3dbfcf0 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 000000000..b628f61ac --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 000000000..1f9c543fa --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 000000000..a2bb6278a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 000000000..649727002 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 000000000..7d0725565 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 000000000..6fe5efa8a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 000000000..8fad5efb5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 000000000..e3d19886e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 000000000..47cd4c692 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 000000000..0ade41179 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go new file mode 100644 index 000000000..4561d72db --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package options + +import ( + "errors" + "fmt" +) + +// CallOption implements an optional argument to a method call. See +// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe +// for an explanation of the usage pattern. +type CallOption interface { + Do(any) error + callOption() +} + +// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and +// callOptions must be a list of objects that implement CallOption. +func ApplyOptions[O, C any](options O, callOptions []C) error { + for _, o := range callOptions { + if t, ok := any(o).(CallOption); !ok { + return fmt.Errorf("unexpected option type %T", o) + } else if err := t.Do(options); err != nil { + return err + } + } + return nil +} + +// NewCallOption returns a new CallOption whose Do() method calls function "f". +func NewCallOption(f func(any) error) CallOption { + if f == nil { + // This isn't a practical concern because only an MSAL maintainer can get + // us here, by implementing a do-nothing option. But if someone does that, + // the below ensures the method invoked with the option returns an error. + return callOption(func(any) error { + return errors.New("invalid option: missing implementation") + }) + } + return callOption(f) +} + +// callOption is an adapter for a function to a CallOption +type callOption func(any) error + +func (c callOption) Do(a any) error { + return c(a) +} + +func (callOption) callOption() {} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 000000000..f7e12a71b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 000000000..b76c0c569 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "1.0.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 000000000..cce05277e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,683 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// clientOptions configures the Client's behavior. +type clientOptions struct { + accessor cache.ExportReplace + authority string + capabilities []string + disableInstanceDiscovery bool + httpClient ops.HTTPClient +} + +func (p *clientOptions) validate() error { + u, err := url.Parse(p.authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *clientOptions) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *clientOptions) { + o.authority = authority + } +} + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := clientOptions{ + authority: base.AuthorityPublicCloud, + httpClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByDeviceCodeOptions: + t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + case *interactiveAuthOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByDeviceCodeOptions: + t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + case *interactiveAuthOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + Claims: o.claims, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims + authParams.Username = username + authParams.Password = password + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode +type acquireTokenByDeviceCodeOptions struct { + claims, tenantID string +} + +// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode +type AcquireByDeviceCodeOption interface { + acquireByDeviceCodeOptions() +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) { + o := acquireTokenByDeviceCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return DeviceCode{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return DeviceCode{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + authParams.Claims = o.claims + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts(ctx context.Context) ([]Account, error) { + return pca.base.AllAccounts(ctx) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(ctx context.Context, account Account) error { + return pca.base.RemoveAccount(ctx, account) +} + +// interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type interactiveAuthOptions struct { + claims, domainHint, loginHint, redirectURI, tenantID string +} + +// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive +type AcquireInteractiveOption interface { + acquireInteractiveOption() +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + case *interactiveAuthOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + case *interactiveAuthOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithRedirectURI sets a port for the local server used in interactive authentication, for +// example http://localhost:port. All URI components other than the port are ignored. +func WithRedirectURI(redirectURI string) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.redirectURI = redirectURI + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +// +// Options: [WithDomainHint], [WithLoginHint], [WithRedirectURI], [WithTenantID] +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { + o := interactiveAuthOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if o.redirectURI != "" { + redirectURL, err = url.Parse(o.redirectURI) + if err != nil { + return AuthResult{}, err + } + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.Claims = o.claims + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.LoginHint = o.loginHint + authParams.DomainHint = o.domainHint + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + res, err := pca.browserLogin(ctx, redirectURL, authParams) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// provides a test hook to simulate opening a browser +var browserOpenURL = func(authURL string) error { + return browser.OpenURL(authURL) +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin launches the system browser for interactive login +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := browserOpenURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index af403bb13..7b503d26a 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -8,12 +8,8 @@ linters: - containedctx # struct contains a context - dupl # duplicate code - errname # erorrs are named correctly - - goconst # strings that should be constants - - godot # comments end in a period - - misspell - nolintlint # "//nolint" directives are properly explained - revive # golint replacement - - stylecheck # golint replacement, less configurable than revive - unconvert # unnecessary conversions - wastedassign @@ -23,10 +19,7 @@ linters: - exhaustive # check exhaustiveness of enum switch statements - gofmt # files are gofmt'ed - gosec # security - - nestif # deeply nested ifs - nilerr # returns nil even with non-nil error - - prealloc # slices that can be pre-allocated - - structcheck # unused struct fields - unparam # unused function params issues: @@ -42,6 +35,18 @@ issues: text: "^line-length-limit: " source: "^//(go:generate|sys) " + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + # allow unjustified ignores of error checks in defer statements - linters: - nolintlint @@ -56,6 +61,8 @@ issues: linters-settings: + exhaustive: + default-signifies-exhaustive: true govet: enable-all: true disable: @@ -98,6 +105,8 @@ linters-settings: disabled: true - name: flag-parameter # excessive, and a common idiom we use disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true # general config - name: line-length-limit arguments: @@ -138,7 +147,3 @@ linters-settings: - VPCI - WCOW - WIM - stylecheck: - checks: - - "all" - - "-ST1003" # use revive's var naming diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index 52f1c280f..c88191658 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -23,7 +23,7 @@ import ( const afHVSock = 34 // AF_HYPERV // Well known Service and VM IDs -//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards // HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 @@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 } // HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff return guid.GUID{ Data1: 0xffffffff, Data2: 0xffff, @@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 000000000..1f6538817 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 000000000..509b3ec64 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,202 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both + +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 000000000..81760ac67 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 000000000..e2f7bb24e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,64 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index 39e8c05f8..aeb7b7250 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error { (*byte)(unsafe.Pointer(&f.addr)), uint32(unsafe.Sizeof(f.addr)), &n, - nil, //overlapped - 0, //completionRoutine + nil, // overlapped + 0, // completionRoutine ) }) return f.err diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 000000000..7ad505702 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // allready sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index ca6e38fc0..25cc81103 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -16,11 +16,12 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc @@ -163,19 +164,21 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { for { select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, + wh, err := fs.CreateFile(*path, access, - 0, - nil, - syscall.OPEN_EXISTING, - windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, - 0) + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + 0, // template file handle + ) + h := syscall.Handle(wh) if err == nil { return h, nil } @@ -219,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { var err error var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) if err != nil { return nil, err } @@ -279,6 +282,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy } defer localFree(ntPath.Buffer) oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 83f45a135..469b16f63 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -63,7 +63,6 @@ var ( procBackupWrite = modkernel32.NewProc("BackupWrite") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") @@ -305,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) newport = syscall.Handle(r0) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 000000000..dd950a286 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go index 6eda2a555..4138e725d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -4,13 +4,13 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "fmt" "io/ioutil" "path/filepath" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -55,6 +55,19 @@ type Provider struct { // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider } // NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured @@ -89,13 +102,31 @@ func (p *Provider) Retrieve() (credentials.Value, error) { // RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal // by exchanging the accessToken present in ~/.aws/sso/cache. func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken } output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, + AccessToken: accessToken, AccountId: &p.AccountID, RoleName: &p.RoleName, }) @@ -114,32 +145,13 @@ func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Val }, nil } -func getCacheFileName(url string) (string, error) { +func getCachedFilePath(startUrl string) (string, error) { hash := sha1.New() - _, err := hash.Write([]byte(url)) + _, err := hash.Write([]byte(startUrl)) if err != nil { return "", err } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil } type token struct { @@ -153,13 +165,8 @@ func (t token) Expired() bool { return nowTime().Round(0).After(time.Time(t.ExpiresAt)) } -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) if err != nil { return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 000000000..f6fa88451 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 000000000..7562cd013 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,139 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 260a37cbb..86db488de 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -9,7 +9,7 @@ to refresh the credentials will be synchronized. But, the SDK is unable to ensure synchronous usage of the AssumeRoleProvider if the value is shared between multiple Credentials, Sessions or service clients. -Assume Role +# Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials with the SDKs's stscreds package. @@ -27,7 +27,7 @@ with the SDKs's stscreds package. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with static MFA Token +# Assume Role with static MFA Token To assume an IAM role with a MFA token you can either specify a MFA token code directly or provide a function to prompt the user each time the credentials @@ -49,7 +49,7 @@ credentials. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with MFA Token Provider +# Assume Role with MFA Token Provider To assume an IAM role with MFA for longer running tasks where the credentials may need to be refreshed setting the TokenProvider field of AssumeRoleProvider @@ -74,7 +74,6 @@ single Credentials with an AssumeRoleProvider can be shared safely. // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) - */ package stscreds @@ -199,6 +198,10 @@ type AssumeRoleProvider struct { // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). SerialNumber *string + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -320,6 +323,7 @@ func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (crede Tags: p.Tags, PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, } if p.Policy != nil { input.Policy = p.Policy diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index debf74510..aedb4f56a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -13,6 +13,8 @@ const ( AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. ) // AWS Standard partition's regions. @@ -69,8 +71,14 @@ const ( UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) +// AWS ISOE (Europe) partition's regions. +const () + +// AWS ISOF partition's regions. +const () + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -78,7 +86,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -94,6 +102,8 @@ var defaultPartitions = partitions{ awsusgovPartition, awsisoPartition, awsisobPartition, + awsisoePartition, + awsisofPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -1867,6 +1877,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2074,6 +2087,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3256,6 +3272,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3274,6 +3296,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -3317,12 +3345,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -3356,6 +3390,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -3543,6 +3580,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3576,6 +3622,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, @@ -3591,6 +3643,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, @@ -3606,6 +3664,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -3621,6 +3685,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, }, }, "auditmanager": service{ @@ -3860,6 +3930,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3979,15 +4052,84 @@ var awsPartition = partition{ }, "backupstorage": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -4033,6 +4175,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -5230,6 +5375,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -5901,6 +6049,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6041,6 +6192,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -6077,6 +6237,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -6746,12 +6912,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, }, }, "connect-campaigns": service{ @@ -6833,12 +7029,21 @@ var awsPartition = partition{ }, "controltower": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -6848,6 +7053,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6872,6 +7080,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6881,6 +7092,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -6920,6 +7134,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -7464,6 +7696,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7664,6 +7899,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7679,6 +7920,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7697,6 +7947,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -7730,6 +7989,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -10724,6 +10989,9 @@ var awsPartition = partition{ }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -10808,6 +11076,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -11473,6 +11744,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11978,6 +12252,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12960,6 +13237,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -13724,13 +14004,6 @@ var awsPartition = partition{ }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-east-1", @@ -13778,45 +14051,35 @@ var awsPartition = partition{ Region: "fips-ca-central-1", }: endpoint{ Hostname: "iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ Hostname: "iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-2", }: endpoint{ Hostname: "iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-1", }: endpoint{ Hostname: "iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -14461,12 +14724,140 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -14656,9 +15047,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14683,6 +15083,51 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -14695,15 +15140,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + }, }, }, "kafkaconnect": service{ @@ -14778,6 +15247,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -14907,6 +15379,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{ @@ -14957,11 +15435,23 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.us-east-1.api.aws", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{ Hostname: "kendra-ranking.us-east-2.api.aws", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{ @@ -14972,6 +15462,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", + }, }, }, "kinesis": service{ @@ -15151,6 +15647,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15624,6 +16123,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17213,6 +17720,9 @@ var awsPartition = partition{ }, "mediaconnect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -17222,6 +17732,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -17231,6 +17744,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17502,53 +18018,111 @@ var awsPartition = partition{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediapackage-vod": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackage-vod": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", @@ -17862,6 +18436,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17871,6 +18448,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17895,6 +18475,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17963,6 +18546,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17972,18 +18558,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18413,6 +19008,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18422,18 +19020,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18707,6 +19314,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18716,6 +19326,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18728,12 +19341,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18854,18 +19473,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -19336,6 +19970,40 @@ var awsPartition = partition{ }, }, }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20203,18 +20871,63 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, }, }, "projects.iot1click": service{ @@ -21688,16 +22401,6 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-east-1.api.aws", - }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -22128,6 +22831,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -22362,6 +23068,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -23912,6 +24621,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24027,6 +24739,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -24051,6 +24769,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -24672,33 +25393,6 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery.sa-east-1.amazonaws.com", }, - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -25007,6 +25701,130 @@ var awsPartition = partition{ }, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + }, + }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26012,6 +26830,118 @@ var awsPartition = partition{ }, }, }, + "ssm-contacts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + }, + }, + }, "ssm-incidents": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26032,6 +26962,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -26047,21 +26983,90 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, }, }, "ssm-sap": service{ @@ -26501,15 +27506,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27517,12 +28513,21 @@ var awsPartition = partition{ }, "transcribestreaming": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -27680,6 +28685,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -27689,6 +28697,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -27701,12 +28712,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -27910,6 +28927,91 @@ var awsPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "voice-chime": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28665,6 +29767,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -29371,6 +30481,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -30087,6 +31205,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.ecr": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30276,6 +31404,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30731,6 +31869,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30922,13 +32070,6 @@ var awscnPartition = partition{ }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -31095,6 +32236,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31446,6 +32597,20 @@ var awscnPartition = partition{ }, }, }, + "savingsplans": service{ + PartitionEndpoint: "aws-cn", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31532,6 +32697,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32426,13 +33601,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, }, }, "applicationinsights": service{ @@ -32547,6 +33754,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -32562,6 +33775,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "athena-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, }, }, "autoscaling": service{ @@ -32625,6 +33844,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -33050,6 +34279,15 @@ var awsusgovPartition = partition{ }, "codepipeline": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -33059,6 +34297,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -33233,9 +34480,24 @@ var awsusgovPartition = partition{ }, "connect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, }, }, "controltower": service{ @@ -34753,30 +36015,19 @@ var awsusgovPartition = partition{ }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -34923,6 +36174,38 @@ var awsusgovPartition = partition{ }, "iottwinmaker": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -34947,10 +36230,56 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "kendra": service{ @@ -35383,6 +36712,46 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -36143,9 +37512,35 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, }, }, "runtime.lex": service{ @@ -36453,6 +37848,20 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "savingsplans": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36772,6 +38181,16 @@ var awsusgovPartition = partition{ }, }, }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36906,14 +38325,14 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, }, }, @@ -37645,6 +39064,15 @@ var awsusgovPartition = partition{ }, "workspaces": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -37654,6 +39082,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -37816,6 +39253,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37828,6 +39272,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37897,6 +39351,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -38251,6 +39715,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "logs": service{ @@ -38311,6 +39778,28 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -38350,6 +39839,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "runtime.sagemaker": service{ @@ -38503,6 +39995,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "transcribe": service{ @@ -38972,6 +40467,28 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39027,6 +40544,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "snowball": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39138,3 +40662,71 @@ var awsisobPartition = partition{ }, }, } + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 1d3f4c3ad..504d72685 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" "github.com/aws/aws-sdk-go/service/sts" ) @@ -23,6 +24,10 @@ type CredentialsProviderOptions struct { // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, // such as setting its ExpiryWindow. WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) } func resolveCredentials(cfg *aws.Config, @@ -33,7 +38,7 @@ func resolveCredentials(cfg *aws.Config, switch { case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration + // User explicitly provided a Profile in the session's configuration // so load that profile from shared config first. // Github(aws/aws-sdk-go#2727) return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) @@ -134,7 +139,11 @@ func resolveCredsFromProfile(cfg *aws.Config, case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) default: // Fallback to default credentials provider, include mock errors for @@ -173,8 +182,25 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req return nil, err } + var optFns []func(provider *ssocreds.Provider) cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + mySession := Must(NewSession()) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } return ssocreds.NewCredentials( &Session{ @@ -184,6 +210,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req sharedCfg.SSOAccountID, sharedCfg.SSORoleName, sharedCfg.SSOStartURL, + optFns..., ), nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index cbccb60bb..8127c99a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -37,7 +37,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 424c82b4d..ea3ac0d03 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -26,6 +26,13 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + // AWS Single Sign-On (AWS SSO) group ssoAccountIDKey = "sso_account_id" ssoRegionKey = "sso_region" @@ -99,6 +106,10 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + // SSO session options + SSOSessionName string + SSOSession *ssoSession + SSOAccountID string SSORegion string SSORoleName string @@ -186,6 +197,20 @@ type sharedConfigFile struct { IniData ini.Sections } +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + // loadSharedConfig retrieves the configuration from the list of files using // the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in @@ -266,13 +291,13 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s // profile only have credential provider options. cfg.clearAssumeRoleOptions() } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } + profiles[profile] = struct{}{} if err := cfg.validateCredentialType(); err != nil { @@ -308,6 +333,30 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.SourceProfile = srcCfg } + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + return nil } @@ -363,6 +412,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e cfg.S3UsEast1RegionalEndpoint = sre } + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + // AWS Single Sign-On (AWS SSO) updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) updateString(&cfg.SSORegion, section, ssoRegionKey) @@ -461,32 +514,20 @@ func (cfg *sharedConfig) validateCredentialType() error { } func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } return nil } - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } } - return nil } @@ -525,15 +566,76 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { } func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) } - return true + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil } func oneOrNone(bs ...bool) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 4d78162c0..0240bd0be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -3,7 +3,7 @@ // Provides request signing for request that need to be signed with // AWS V4 Signatures. // -// Standalone Signer +// # Standalone Signer // // Generally using the signer outside of the SDK should not require any additional // logic when using Go v1.5 or higher. The signer does this by taking advantage @@ -14,10 +14,10 @@ // The signer will first check the URL.Opaque field, and use its value if set. // The signer does require the URL.Opaque field to be set in the form of: // -// "///" +// "///" // -// // e.g. -// "//example.com/some/path" +// // e.g. +// "//example.com/some/path" // // The leading "//" and hostname are required or the URL.Opaque escaping will // not work correctly. @@ -695,7 +695,8 @@ func (ctx *signingCtx) buildBodyDigest() error { includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" s3Presign := ctx.isPresign && (ctx.ServiceName == "s3" || diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index f34d39820..096df3984 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.245" +const SDKVersion = "1.44.302" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 1d273ff0e..ecc521f88 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -287,6 +287,10 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) if tag.Get("location") != "header" || tag.Get("enum") == "" { return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) } + if len(value) == 0 { + return "", errValueNotSet + } + buff := &bytes.Buffer{} for i, sv := range value { if sv == nil || len(*sv) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go index 4fffd0427..5366a646d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -2,6 +2,7 @@ package restjson import ( "bytes" + "encoding/json" "io" "io/ioutil" "net/http" @@ -40,54 +41,30 @@ func (u *UnmarshalTypedError) UnmarshalError( resp *http.Response, respMeta protocol.ResponseMetadata, ) (error, error) { - - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 || len(msg) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - if len(code) == 0 { - code = jsonErr.Code - } - msg = jsonErr.Message + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err } - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil + } - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } - return v, nil + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err } - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil + return v, nil } // UnmarshalErrorHandler is a named request handler for unmarshaling restjson @@ -101,36 +78,80 @@ var UnmarshalErrorHandler = request.NamedHandler{ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), + awserr.New(code, msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) } type jsonErrorResponse struct { + Type string `json:"__type"` Code string `json:"code"` Message string `json:"message"` } + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 1ed7b2921..216289f5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -683,15 +683,10 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // see Bring Your Own IP Addresses (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html) // in the Amazon Elastic Compute Cloud User Guide. // -// [EC2-VPC] If you release an Elastic IP address, you might be able to recover -// it. You cannot recover an Elastic IP address that you released after it is -// allocated to another Amazon Web Services account. You cannot recover an Elastic -// IP address for EC2-Classic. To attempt to recover an Elastic IP address that -// you released, specify it in this operation. -// -// An Elastic IP address is for use either in the EC2-Classic platform or in -// a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic -// per Region and 5 Elastic IP addresses for EC2-VPC per Region. +// If you release an Elastic IP address, you might be able to recover it. You +// cannot recover an Elastic IP address that you released after it is allocated +// to another Amazon Web Services account. To attempt to recover an Elastic +// IP address that you released, specify it in this operation. // // For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -700,10 +695,6 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // telecommunication carrier, to a network interface which resides in a subnet // in a Wavelength Zone (for example an EC2 instance). // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -850,9 +841,12 @@ func (c *EC2) AllocateIpamPoolCidrRequest(input *AllocateIpamPoolCidrInput) (req // AllocateIpamPoolCidr API operation for Amazon Elastic Compute Cloud. // -// Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment -// from an IPAM pool to another IPAM pool or to a resource. For more information, -// see Allocate CIDRs (https://docs.aws.amazon.com/vpc/latest/ipam/allocate-cidrs-ipam.html) +// Allocate a CIDR from an IPAM pool. The Region you use should be the IPAM +// pool locale. The locale is the Amazon Web Services Region where this IPAM +// pool is available for allocations. +// +// In IPAM, an allocation is a CIDR assignment from an IPAM pool to another +// IPAM pool or to a resource. For more information, see Allocate CIDRs (https://docs.aws.amazon.com/vpc/latest/ipam/allocate-cidrs-ipam.html) // in the Amazon VPC IPAM User Guide. // // This action creates an allocation with strong consistency. The returned CIDR @@ -1270,23 +1264,11 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // are in subnets in Wavelength Zones) with an instance or a network interface. // Before you can use an Elastic IP address, you must allocate it to your account. // -// An Elastic IP address is for use in either the EC2-Classic platform or in -// a VPC. For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) -// in the Amazon Elastic Compute Cloud User Guide. -// -// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is -// already associated with a different instance, it is disassociated from that -// instance and associated with the specified instance. If you associate an -// Elastic IP address with an instance that has an existing Elastic IP address, -// the existing address is disassociated from the instance, but remains allocated -// to your account. -// -// [VPC in an EC2-Classic account] If you don't specify a private IP address, -// the Elastic IP address is associated with the primary IP address. If the -// Elastic IP address is already associated with a different instance or a network -// interface, you get an error unless you allow reassociation. You cannot associate -// an Elastic IP address with an instance or network interface that has an existing -// Elastic IP address. +// If the Elastic IP address is already associated with a different instance, +// it is disassociated from that instance and associated with the specified +// instance. If you associate an Elastic IP address with an instance that has +// an existing Elastic IP address, the existing address is disassociated from +// the instance, but remains allocated to your account. // // [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication // carrier to the instance or network interface. @@ -1299,10 +1281,6 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // the Elastic IP address is remapped to the same instance. For more information, // see the Elastic IP Addresses section of Amazon EC2 Pricing (http://aws.amazon.com/ec2/pricing/). // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2725,9 +2703,8 @@ func (c *EC2) AttachVerifiedAccessTrustProviderRequest(input *AttachVerifiedAcce // AttachVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud. // -// A trust provider is a third-party entity that creates, maintains, and manages -// identity information for users and devices. One or more trust providers can -// be attached to an Amazon Web Services Verified Access instance. +// Attaches the specified Amazon Web Services Verified Access trust provider +// to the specified Amazon Web Services Verified Access instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5416,10 +5393,12 @@ func (c *EC2) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, // CreateFleet API operation for Amazon Elastic Compute Cloud. // -// Launches an EC2 Fleet. +// Creates an EC2 Fleet that contains the configuration information for On-Demand +// Instances and Spot Instances. Instances are launched immediately if there +// is available capacity. // -// You can create a single EC2 Fleet that includes multiple launch specifications -// that vary by instance type, AMI, Availability Zone, or subnet. +// A single EC2 Fleet can include multiple launch specifications that vary by +// instance type, AMI, Availability Zone, or subnet. // // For more information, see EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html) // in the Amazon EC2 User Guide. @@ -5714,6 +5693,85 @@ func (c *EC2) CreateImageWithContext(ctx aws.Context, input *CreateImageInput, o return out, req.Send() } +const opCreateInstanceConnectEndpoint = "CreateInstanceConnectEndpoint" + +// CreateInstanceConnectEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceConnectEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInstanceConnectEndpoint for more information on using the CreateInstanceConnectEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateInstanceConnectEndpointRequest method. +// req, resp := client.CreateInstanceConnectEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceConnectEndpoint +func (c *EC2) CreateInstanceConnectEndpointRequest(input *CreateInstanceConnectEndpointInput) (req *request.Request, output *CreateInstanceConnectEndpointOutput) { + op := &request.Operation{ + Name: opCreateInstanceConnectEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceConnectEndpointInput{} + } + + output = &CreateInstanceConnectEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateInstanceConnectEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Creates an EC2 Instance Connect Endpoint. +// +// An EC2 Instance Connect Endpoint allows you to connect to a resource, without +// requiring the resource to have a public IPv4 address. For more information, +// see Connect to your resources without requiring a public IPv4 address using +// EC2 Instance Connect Endpoint (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Connect-using-EC2-Instance-Connect-Endpoint.html) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateInstanceConnectEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceConnectEndpoint +func (c *EC2) CreateInstanceConnectEndpoint(input *CreateInstanceConnectEndpointInput) (*CreateInstanceConnectEndpointOutput, error) { + req, out := c.CreateInstanceConnectEndpointRequest(input) + return out, req.Send() +} + +// CreateInstanceConnectEndpointWithContext is the same as CreateInstanceConnectEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInstanceConnectEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateInstanceConnectEndpointWithContext(ctx aws.Context, input *CreateInstanceConnectEndpointInput, opts ...request.Option) (*CreateInstanceConnectEndpointOutput, error) { + req, out := c.CreateInstanceConnectEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateInstanceEventWindow = "CreateInstanceEventWindow" // CreateInstanceEventWindowRequest generates a "aws/request.Request" representing the @@ -10070,10 +10128,9 @@ func (c *EC2) CreateVerifiedAccessGroupRequest(input *CreateVerifiedAccessGroupI // // An Amazon Web Services Verified Access group is a collection of Amazon Web // Services Verified Access endpoints who's associated applications have similar -// security requirements. Each instance within an Amazon Web Services Verified -// Access group shares an Amazon Web Services Verified Access policy. For example, -// you can group all Amazon Web Services Verified Access instances associated -// with “sales” applications together and use one common Amazon Web Services +// security requirements. Each instance within a Verified Access group shares +// an Verified Access policy. For example, you can group all Verified Access +// instances associated with "sales" applications together and use one common // Verified Access policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10224,9 +10281,8 @@ func (c *EC2) CreateVerifiedAccessTrustProviderRequest(input *CreateVerifiedAcce // // A trust provider is a third-party entity that creates, maintains, and manages // identity information for users and devices. When an application request is -// made, the identity information sent by the trust provider will be evaluated -// by Amazon Web Services Verified Access, before allowing or denying the application -// request. +// made, the identity information sent by the trust provider is evaluated by +// Verified Access before allowing or denying the application request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11861,6 +11917,79 @@ func (c *EC2) DeleteFpgaImageWithContext(ctx aws.Context, input *DeleteFpgaImage return out, req.Send() } +const opDeleteInstanceConnectEndpoint = "DeleteInstanceConnectEndpoint" + +// DeleteInstanceConnectEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstanceConnectEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInstanceConnectEndpoint for more information on using the DeleteInstanceConnectEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteInstanceConnectEndpointRequest method. +// req, resp := client.DeleteInstanceConnectEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInstanceConnectEndpoint +func (c *EC2) DeleteInstanceConnectEndpointRequest(input *DeleteInstanceConnectEndpointInput) (req *request.Request, output *DeleteInstanceConnectEndpointOutput) { + op := &request.Operation{ + Name: opDeleteInstanceConnectEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceConnectEndpointInput{} + } + + output = &DeleteInstanceConnectEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInstanceConnectEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified EC2 Instance Connect Endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteInstanceConnectEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInstanceConnectEndpoint +func (c *EC2) DeleteInstanceConnectEndpoint(input *DeleteInstanceConnectEndpointInput) (*DeleteInstanceConnectEndpointOutput, error) { + req, out := c.DeleteInstanceConnectEndpointRequest(input) + return out, req.Send() +} + +// DeleteInstanceConnectEndpointWithContext is the same as DeleteInstanceConnectEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInstanceConnectEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteInstanceConnectEndpointWithContext(ctx aws.Context, input *DeleteInstanceConnectEndpointInput, opts ...request.Option) (*DeleteInstanceConnectEndpointOutput, error) { + req, out := c.DeleteInstanceConnectEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteInstanceEventWindow = "DeleteInstanceEventWindow" // DeleteInstanceEventWindowRequest generates a "aws/request.Request" representing the @@ -17114,9 +17243,6 @@ func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI // Describes attributes of your Amazon Web Services account. The following are // the supported account attributes: // -// - supported-platforms: Indicates whether your account can launch instances -// into EC2-Classic and EC2-VPC, or only into EC2-VPC. -// // - default-vpc: The ID of the default VPC for your account, or none. // // - max-instances: This attribute is no longer supported. The returned value @@ -17124,19 +17250,16 @@ func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI // For more information, see On-Demand Instance Limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits) // in the Amazon Elastic Compute Cloud User Guide. // -// - vpc-max-security-groups-per-interface: The maximum number of security -// groups that you can assign to a network interface. -// // - max-elastic-ips: The maximum number of Elastic IP addresses that you -// can allocate for use with EC2-Classic. +// can allocate. +// +// - supported-platforms: This attribute is deprecated. // // - vpc-max-elastic-ips: The maximum number of Elastic IP addresses that -// you can allocate for use with EC2-VPC. +// you can allocate. // -// We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate -// from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic -// to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon EC2 User Guide. +// - vpc-max-security-groups-per-interface: The maximum number of security +// groups that you can assign to a network interface. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17219,6 +17342,15 @@ func (c *EC2) DescribeAddressTransfersRequest(input *DescribeAddressTransfersInp // Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) // in the Amazon Virtual Private Cloud User Guide. // +// When you transfer an Elastic IP address, there is a two-step handshake between +// the source and transfer Amazon Web Services accounts. When the source account +// starts the transfer, the transfer account has seven days to accept the Elastic +// IP address transfer. During those seven days, the source account can view +// the pending transfer by using this action. After seven days, the transfer +// expires and ownership of the Elastic IP address returns to the source account. +// Accepted transfers are visible to the source account for three days after +// the transfers have been accepted. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -17343,14 +17475,6 @@ func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *requ // // Describes the specified Elastic IP addresses or all of your Elastic IP addresses. // -// An Elastic IP address is for use in either the EC2-Classic platform or in -// a VPC. For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) -// in the Amazon Elastic Compute Cloud User Guide. -// -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -22176,6 +22300,137 @@ func (c *EC2) DescribeInstanceAttributeWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeInstanceConnectEndpoints = "DescribeInstanceConnectEndpoints" + +// DescribeInstanceConnectEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceConnectEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceConnectEndpoints for more information on using the DescribeInstanceConnectEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeInstanceConnectEndpointsRequest method. +// req, resp := client.DescribeInstanceConnectEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceConnectEndpoints +func (c *EC2) DescribeInstanceConnectEndpointsRequest(input *DescribeInstanceConnectEndpointsInput) (req *request.Request, output *DescribeInstanceConnectEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeInstanceConnectEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceConnectEndpointsInput{} + } + + output = &DescribeInstanceConnectEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceConnectEndpoints API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified EC2 Instance Connect Endpoints or all EC2 Instance +// Connect Endpoints. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceConnectEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceConnectEndpoints +func (c *EC2) DescribeInstanceConnectEndpoints(input *DescribeInstanceConnectEndpointsInput) (*DescribeInstanceConnectEndpointsOutput, error) { + req, out := c.DescribeInstanceConnectEndpointsRequest(input) + return out, req.Send() +} + +// DescribeInstanceConnectEndpointsWithContext is the same as DescribeInstanceConnectEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceConnectEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceConnectEndpointsWithContext(ctx aws.Context, input *DescribeInstanceConnectEndpointsInput, opts ...request.Option) (*DescribeInstanceConnectEndpointsOutput, error) { + req, out := c.DescribeInstanceConnectEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceConnectEndpointsPages iterates over the pages of a DescribeInstanceConnectEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceConnectEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceConnectEndpoints operation. +// pageNum := 0 +// err := client.DescribeInstanceConnectEndpointsPages(params, +// func(page *ec2.DescribeInstanceConnectEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EC2) DescribeInstanceConnectEndpointsPages(input *DescribeInstanceConnectEndpointsInput, fn func(*DescribeInstanceConnectEndpointsOutput, bool) bool) error { + return c.DescribeInstanceConnectEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceConnectEndpointsPagesWithContext same as DescribeInstanceConnectEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceConnectEndpointsPagesWithContext(ctx aws.Context, input *DescribeInstanceConnectEndpointsInput, fn func(*DescribeInstanceConnectEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceConnectEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceConnectEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceConnectEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstanceCreditSpecifications = "DescribeInstanceCreditSpecifications" // DescribeInstanceCreditSpecificationsRequest generates a "aws/request.Request" representing the @@ -25328,9 +25583,11 @@ func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput // DescribeMovingAddresses API operation for Amazon Elastic Compute Cloud. // -// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, -// or that are being restored to the EC2-Classic platform. This request does -// not return information about any other Elastic IP addresses in your account. +// This action is deprecated. +// +// Describes your Elastic IP addresses that are being moved from or being restored +// to the EC2-Classic platform. This request does not return information about +// any other Elastic IP addresses in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -31874,7 +32131,7 @@ func (c *EC2) DescribeVerifiedAccessEndpointsRequest(input *DescribeVerifiedAcce // DescribeVerifiedAccessEndpoints API operation for Amazon Elastic Compute Cloud. // -// Describe Amazon Web Services Verified Access endpoints. +// Describes the specified Amazon Web Services Verified Access endpoints. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -32004,7 +32261,7 @@ func (c *EC2) DescribeVerifiedAccessGroupsRequest(input *DescribeVerifiedAccessG // DescribeVerifiedAccessGroups API operation for Amazon Elastic Compute Cloud. // -// Describe details of existing Verified Access groups. +// Describes the specified Verified Access groups. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -32134,8 +32391,7 @@ func (c *EC2) DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(input * // DescribeVerifiedAccessInstanceLoggingConfigurations API operation for Amazon Elastic Compute Cloud. // -// Describes the current logging configuration for the Amazon Web Services Verified -// Access instances. +// Describes the specified Amazon Web Services Verified Access instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -32265,7 +32521,7 @@ func (c *EC2) DescribeVerifiedAccessInstancesRequest(input *DescribeVerifiedAcce // DescribeVerifiedAccessInstances API operation for Amazon Elastic Compute Cloud. // -// Describe Verified Access instances. +// Describes the specified Amazon Web Services Verified Access instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -32395,7 +32651,7 @@ func (c *EC2) DescribeVerifiedAccessTrustProvidersRequest(input *DescribeVerifie // DescribeVerifiedAccessTrustProviders API operation for Amazon Elastic Compute Cloud. // -// Describe details of existing Verified Access trust providers. +// Describes the specified Amazon Web Services Verified Access trust providers. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -34704,7 +34960,8 @@ func (c *EC2) DetachVerifiedAccessTrustProviderRequest(input *DetachVerifiedAcce // DetachVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud. // -// Detach a trust provider from an Amazon Web Services Verified Access instance. +// Detaches the specified Amazon Web Services Verified Access trust provider +// from the specified Amazon Web Services Verified Access instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -35868,14 +36125,6 @@ func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req * // Disassociates an Elastic IP address from the instance or network interface // it's associated with. // -// An Elastic IP address is for use in either the EC2-Classic platform or in -// a VPC. For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) -// in the Amazon Elastic Compute Cloud User Guide. -// -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // This is an idempotent operation. If you perform the operation more than once, // Amazon EC2 doesn't return an error. // @@ -40167,7 +40416,9 @@ func (c *EC2) GetIpamPoolAllocationsRequest(input *GetIpamPoolAllocationsInput) // GetIpamPoolAllocations API operation for Amazon Elastic Compute Cloud. // -// Get a list of all the CIDR allocations in an IPAM pool. +// Get a list of all the CIDR allocations in an IPAM pool. The Region you use +// should be the IPAM pool locale. The locale is the Amazon Web Services Region +// where this IPAM pool is available for allocations. // // If you use this action after AllocateIpamPoolCidr (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AllocateIpamPoolCidr.html) // or ReleaseIpamPoolAllocation (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ReleaseIpamPoolAllocation.html), @@ -40888,6 +41139,12 @@ func (c *EC2) GetNetworkInsightsAccessScopeAnalysisFindingsRequest(input *GetNet Name: opGetNetworkInsightsAccessScopeAnalysisFindings, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -40931,6 +41188,57 @@ func (c *EC2) GetNetworkInsightsAccessScopeAnalysisFindingsWithContext(ctx aws.C return out, req.Send() } +// GetNetworkInsightsAccessScopeAnalysisFindingsPages iterates over the pages of a GetNetworkInsightsAccessScopeAnalysisFindings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetNetworkInsightsAccessScopeAnalysisFindings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetNetworkInsightsAccessScopeAnalysisFindings operation. +// pageNum := 0 +// err := client.GetNetworkInsightsAccessScopeAnalysisFindingsPages(params, +// func(page *ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EC2) GetNetworkInsightsAccessScopeAnalysisFindingsPages(input *GetNetworkInsightsAccessScopeAnalysisFindingsInput, fn func(*GetNetworkInsightsAccessScopeAnalysisFindingsOutput, bool) bool) error { + return c.GetNetworkInsightsAccessScopeAnalysisFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetNetworkInsightsAccessScopeAnalysisFindingsPagesWithContext same as GetNetworkInsightsAccessScopeAnalysisFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetNetworkInsightsAccessScopeAnalysisFindingsPagesWithContext(ctx aws.Context, input *GetNetworkInsightsAccessScopeAnalysisFindingsInput, fn func(*GetNetworkInsightsAccessScopeAnalysisFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetNetworkInsightsAccessScopeAnalysisFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetNetworkInsightsAccessScopeAnalysisFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetNetworkInsightsAccessScopeAnalysisFindingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetNetworkInsightsAccessScopeContent = "GetNetworkInsightsAccessScopeContent" // GetNetworkInsightsAccessScopeContentRequest generates a "aws/request.Request" representing the @@ -44638,10 +44946,10 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput // only one attribute at a time. // // Note: Using this action to change the security groups associated with an -// elastic network interface (ENI) attached to an instance in a VPC can result -// in an error if the instance has more than one ENI. To change the security -// groups associated with an ENI attached to an instance that has multiple ENIs, -// we recommend that you use the ModifyNetworkInterfaceAttribute action. +// elastic network interface (ENI) attached to an instance can result in an +// error if the instance has more than one ENI. To change the security groups +// associated with an ENI attached to an instance that has multiple ENIs, we +// recommend that you use the ModifyNetworkInterfaceAttribute action. // // To modify some attributes, the instance must be stopped. For more information, // see Modify a stopped instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) @@ -46040,10 +46348,6 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput // For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) // in the Amazon EC2 User Guide. // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -46994,7 +47298,8 @@ func (c *EC2) ModifyVerifiedAccessEndpointRequest(input *ModifyVerifiedAccessEnd // ModifyVerifiedAccessEndpoint API operation for Amazon Elastic Compute Cloud. // -// Modifies the configuration of an Amazon Web Services Verified Access endpoint. +// Modifies the configuration of the specified Amazon Web Services Verified +// Access endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -47067,7 +47372,7 @@ func (c *EC2) ModifyVerifiedAccessEndpointPolicyRequest(input *ModifyVerifiedAcc // ModifyVerifiedAccessEndpointPolicy API operation for Amazon Elastic Compute Cloud. // -// Modifies the specified Verified Access endpoint policy. +// Modifies the specified Amazon Web Services Verified Access endpoint policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -47140,7 +47445,7 @@ func (c *EC2) ModifyVerifiedAccessGroupRequest(input *ModifyVerifiedAccessGroupI // ModifyVerifiedAccessGroup API operation for Amazon Elastic Compute Cloud. // -// Modifies the specified Verified Access group configuration. +// Modifies the specified Amazon Web Services Verified Access group configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -47213,7 +47518,7 @@ func (c *EC2) ModifyVerifiedAccessGroupPolicyRequest(input *ModifyVerifiedAccess // ModifyVerifiedAccessGroupPolicy API operation for Amazon Elastic Compute Cloud. // -// Modifies the specified Verified Access group policy. +// Modifies the specified Amazon Web Services Verified Access group policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -47286,7 +47591,8 @@ func (c *EC2) ModifyVerifiedAccessInstanceRequest(input *ModifyVerifiedAccessIns // ModifyVerifiedAccessInstance API operation for Amazon Elastic Compute Cloud. // -// Modifies the configuration of the specified Verified Access instance. +// Modifies the configuration of the specified Amazon Web Services Verified +// Access instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -48748,6 +49054,8 @@ func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *reques // MoveAddressToVpc API operation for Amazon Elastic Compute Cloud. // +// This action is deprecated. +// // Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC // platform. The Elastic IP address must be allocated to your account for more // than 24 hours, and it must not be associated with an instance. After the @@ -48756,10 +49064,6 @@ func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *reques // You cannot move an Elastic IP address that was originally allocated for use // in the EC2-VPC platform to the EC2-Classic platform. // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -49243,10 +49547,6 @@ func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedIn // and Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon EC2 User Guide. // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -50226,13 +50526,9 @@ func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Re // // Releases the specified Elastic IP address. // -// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically -// disassociates it from any instance that it's associated with. To disassociate -// an Elastic IP address without releasing it, use DisassociateAddress. -// -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. +// [Default VPC] Releasing an Elastic IP address automatically disassociates +// it from any instance that it's associated with. To disassociate an Elastic +// IP address without releasing it, use DisassociateAddress. // // [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic // IP address before you can release it. Otherwise, Amazon EC2 returns an error @@ -50244,11 +50540,8 @@ func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Re // already released, you'll get an AuthFailure error if the address is already // allocated to another Amazon Web Services account. // -// [EC2-VPC] After you release an Elastic IP address for use in a VPC, you might -// be able to recover it. For more information, see AllocateAddress. -// -// For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) -// in the Amazon Elastic Compute Cloud User Guide. +// After you release an Elastic IP address, you might be able to recover it. +// For more information, see AllocateAddress. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -50404,9 +50697,11 @@ func (c *EC2) ReleaseIpamPoolAllocationRequest(input *ReleaseIpamPoolAllocationI // ReleaseIpamPoolAllocation API operation for Amazon Elastic Compute Cloud. // -// Release an allocation within an IPAM pool. You can only use this action to -// release manual allocations. To remove an allocation for a resource without -// deleting the resource, set its monitored state to false using ModifyIpamResourceCidr +// Release an allocation within an IPAM pool. The Region you use should be the +// IPAM pool locale. The locale is the Amazon Web Services Region where this +// IPAM pool is available for allocations. You can only use this action to release +// manual allocations. To remove an allocation for a resource without deleting +// the resource, set its monitored state to false using ModifyIpamResourceCidr // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyIpamResourceCidr.html). // For more information, see Release an allocation (https://docs.aws.amazon.com/vpc/latest/ipam/release-pool-alloc-ipam.html) // in the Amazon VPC IPAM User Guide. @@ -51217,10 +51512,6 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // see Which is the best Spot request method to use? (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-best-practices.html#which-spot-request-method-to-use) // in the Amazon EC2 User Guide for Linux Instances. // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon EC2 User Guide for Linux Instances. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -51829,15 +52120,13 @@ func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput // RestoreAddressToClassic API operation for Amazon Elastic Compute Cloud. // +// This action is deprecated. +// // Restores an Elastic IP address that was previously moved to the EC2-VPC platform // back to the EC2-Classic platform. You cannot move an Elastic IP address that // was originally allocated for use in EC2-VPC. The Elastic IP address must // not be associated with an instance or network interface. // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -52482,20 +52771,13 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // You can specify a number of options, or leave the default options. The following // rules apply: // -// - [EC2-VPC] If you don't specify a subnet ID, we choose a default subnet -// from your default VPC for you. If you don't have a default VPC, you must -// specify a subnet ID in the request. +// - If you don't specify a subnet ID, we choose a default subnet from your +// default VPC for you. If you don't have a default VPC, you must specify +// a subnet ID in the request. // -// - [EC2-Classic] If don't specify an Availability Zone, we choose one for -// you. -// -// - Some instance types must be launched into a VPC. If you do not have -// a default VPC, or if you do not specify a subnet ID, the request fails. -// For more information, see Instance types available only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). -// -// - [EC2-VPC] All instances have a network interface with a primary private -// IPv4 address. If you don't specify this address, we choose one from the -// IPv4 range of your subnet. +// - All instances have a network interface with a primary private IPv4 address. +// If you don't specify this address, we choose one from the IPv4 range of +// your subnet. // // - Not all instance types support IPv6 addresses. For more information, // see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). @@ -52529,10 +52811,6 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), // and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html). // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon EC2 User Guide. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -53440,8 +53718,8 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // in the Amazon EC2 User Guide. // // You can use the Stop action to hibernate an instance if the instance is enabled -// for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#enabling-hibernation) -// and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +// for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enabling-hibernation.html) +// and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. // @@ -55650,11 +55928,10 @@ func (s *AdditionalDetail) SetVpcEndpointService(v *AnalysisComponent) *Addition type Address struct { _ struct{} `type:"structure"` - // The ID representing the allocation of the address for use with EC2-VPC. + // The ID representing the allocation of the address. AllocationId *string `locationName:"allocationId" type:"string"` - // The ID representing the association of the address with an instance in a - // VPC. + // The ID representing the association of the address with an instance. AssociationId *string `locationName:"associationId" type:"string"` // The carrier IP address associated. This option is only available for network @@ -55668,8 +55945,7 @@ type Address struct { // The ID of the customer-owned address pool. CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` - // Indicates whether this Elastic IP address is for use with instances in EC2-Classic - // (standard) or instances in a VPC (vpc). + // The network (vpc). Domain *string `locationName:"domain" type:"string" enum:"DomainType"` // The ID of the instance that the address is associated with (if any). @@ -56034,8 +56310,7 @@ func (s *AdvertiseByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *AdvertiseByoipCid type AllocateAddressInput struct { _ struct{} `type:"structure"` - // [EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address - // pool. + // The Elastic IP address to recover or an IPv4 address from an address pool. Address *string `type:"string"` // The ID of a customer-owned address pool. Use this parameter to let Amazon @@ -56043,11 +56318,7 @@ type AllocateAddressInput struct { // address from the address pool. CustomerOwnedIpv4Pool *string `type:"string"` - // Indicates whether the Elastic IP address is for use with instances in a VPC - // or instances in EC2-Classic. - // - // Default: If the Region supports EC2-Classic, the default is standard. Otherwise, - // the default is vpc. + // The network (vpc). Domain *string `type:"string" enum:"DomainType"` // Checks whether you have the required permissions for the action, without @@ -56140,12 +56411,11 @@ func (s *AllocateAddressInput) SetTagSpecifications(v []*TagSpecification) *Allo type AllocateAddressOutput struct { _ struct{} `type:"structure"` - // [EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation - // of the Elastic IP address for use with instances in a VPC. + // The ID that represents the allocation of the Elastic IP address. AllocationId *string `locationName:"allocationId" type:"string"` // The carrier IP address. This option is only available for network interfaces - // which reside in a subnet in a Wavelength Zone (for example an EC2 instance). + // that reside in a subnet in a Wavelength Zone. CarrierIp *string `locationName:"carrierIp" type:"string"` // The customer-owned IP address. @@ -56154,8 +56424,7 @@ type AllocateAddressOutput struct { // The ID of the customer-owned address pool. CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` - // Indicates whether the Elastic IP address is for use with instances in a VPC - // (vpc) or instances in EC2-Classic (standard). + // The network (vpc). Domain *string `locationName:"domain" type:"string" enum:"DomainType"` // The set of Availability Zones, Local Zones, or Wavelength Zones from which @@ -56238,6 +56507,19 @@ func (s *AllocateAddressOutput) SetPublicIpv4Pool(v string) *AllocateAddressOutp type AllocateHostsInput struct { _ struct{} `type:"structure"` + // The IDs of the Outpost hardware assets on which to allocate the Dedicated + // Hosts. Targeting specific hardware assets on an Outpost can help to minimize + // latency between your workloads. This parameter is supported only if you specify + // OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this + // parameter. + // + // * If you specify this parameter, you can omit Quantity. In this case, + // Amazon EC2 allocates a Dedicated Host on each specified hardware asset. + // + // * If you specify both AssetIds and Quantity, then the value for Quantity + // must be equal to the number of asset IDs specified. + AssetIds []*string `locationName:"AssetId" type:"list"` + // Indicates whether the host accepts any untargeted instance launches that // match its instance type configuration, or if it only accepts Host tenancy // instance launches that specify its unique host ID. For more information, @@ -56288,13 +56570,19 @@ type AllocateHostsInput struct { InstanceType *string `locationName:"instanceType" type:"string"` // The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on which - // to allocate the Dedicated Host. + // to allocate the Dedicated Host. If you specify OutpostArn, you can optionally + // specify AssetIds. + // + // If you are allocating the Dedicated Host in a Region, omit this parameter. OutpostArn *string `type:"string"` // The number of Dedicated Hosts to allocate to your account with these parameters. - // - // Quantity is a required field - Quantity *int64 `locationName:"quantity" type:"integer" required:"true"` + // If you are allocating the Dedicated Hosts on an Outpost, and you specify + // AssetIds, you can omit this parameter. In this case, Amazon EC2 allocates + // a Dedicated Host on each specified hardware asset. If you specify both AssetIds + // and Quantity, then the value that you specify for Quantity must be equal + // to the number of asset IDs specified. + Quantity *int64 `locationName:"quantity" type:"integer"` // The tags to apply to the Dedicated Host during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` @@ -56324,9 +56612,6 @@ func (s *AllocateHostsInput) Validate() error { if s.AvailabilityZone == nil { invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) } - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) - } if invalidParams.Len() > 0 { return invalidParams @@ -56334,6 +56619,12 @@ func (s *AllocateHostsInput) Validate() error { return nil } +// SetAssetIds sets the AssetIds field's value. +func (s *AllocateHostsInput) SetAssetIds(v []*string) *AllocateHostsInput { + s.AssetIds = v + return s +} + // SetAutoPlacement sets the AutoPlacement field's value. func (s *AllocateHostsInput) SetAutoPlacement(v string) *AllocateHostsInput { s.AutoPlacement = &v @@ -57803,15 +58094,11 @@ func (s *AssignedPrivateIpAddress) SetPrivateIpAddress(v string) *AssignedPrivat type AssociateAddressInput struct { _ struct{} `type:"structure"` - // [EC2-VPC] The allocation ID. This is required for EC2-VPC. + // The allocation ID. This is required. AllocationId *string `type:"string"` - // [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic - // IP address that is already associated with an instance or network interface - // to be reassociated with the specified instance or network interface. Otherwise, - // the operation fails. In a VPC in an EC2-VPC-only account, reassociation is - // automatic, therefore you can specify false to ensure the operation fails - // if the Elastic IP address is already associated with another resource. + // Reassociation is automatic, but you can specify false to ensure the operation + // fails if the Elastic IP address is already associated with another resource. AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"` // Checks whether you have the required permissions for the action, without @@ -57821,25 +58108,23 @@ type AssociateAddressInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // The ID of the instance. The instance must have exactly one attached network - // interface. For EC2-VPC, you can specify either the instance ID or the network - // interface ID, but not both. For EC2-Classic, you must specify an instance - // ID and the instance must be in the running state. + // interface. You can specify either the instance ID or the network interface + // ID, but not both. InstanceId *string `type:"string"` - // [EC2-VPC] The ID of the network interface. If the instance has more than - // one network interface, you must specify a network interface ID. + // The ID of the network interface. If the instance has more than one network + // interface, you must specify a network interface ID. // - // For EC2-VPC, you can specify either the instance ID or the network interface - // ID, but not both. + // You can specify either the instance ID or the network interface ID, but not + // both. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` - // [EC2-VPC] The primary or secondary private IP address to associate with the - // Elastic IP address. If no private IP address is specified, the Elastic IP - // address is associated with the primary private IP address. + // The primary or secondary private IP address to associate with the Elastic + // IP address. If no private IP address is specified, the Elastic IP address + // is associated with the primary private IP address. PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` - // [EC2-Classic] The Elastic IP address to associate with the instance. This - // is required for EC2-Classic. + // Deprecated. PublicIp *string `type:"string"` } @@ -57906,8 +58191,8 @@ func (s *AssociateAddressInput) SetPublicIp(v string) *AssociateAddressInput { type AssociateAddressOutput struct { _ struct{} `type:"structure"` - // [EC2-VPC] The ID that represents the association of the Elastic IP address - // with an instance. + // The ID that represents the association of the Elastic IP address with an + // instance. AssociationId *string `locationName:"associationId" type:"string"` } @@ -60191,12 +60476,12 @@ type AttachVerifiedAccessTrustProviderInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. // // VerifiedAccessInstanceId is a required field VerifiedAccessInstanceId *string `type:"string" required:"true"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. // // VerifiedAccessTrustProviderId is a required field VerifiedAccessTrustProviderId *string `type:"string" required:"true"` @@ -60263,10 +60548,10 @@ func (s *AttachVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderI type AttachVerifiedAccessTrustProviderOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"` } @@ -62959,7 +63244,7 @@ type CancelSpotInstanceRequestsInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // One or more Spot Instance request IDs. + // The IDs of the Spot Instance requests. // // SpotInstanceRequestIds is a required field SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"` @@ -63012,7 +63297,7 @@ func (s *CancelSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*string) type CancelSpotInstanceRequestsOutput struct { _ struct{} `type:"structure"` - // One or more Spot Instance requests. + // The Spot Instance requests. CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` } @@ -66781,6 +67066,10 @@ func (s *CopySnapshotOutput) SetTags(v []*Tag) *CopySnapshotOutput { type CpuOptions struct { _ struct{} `type:"structure"` + // Indicates whether the instance is enabled for AMD SEV-SNP. For more information, + // see AMD SEV-SNP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html). + AmdSevSnp *string `locationName:"amdSevSnp" type:"string" enum:"AmdSevSnpSpecification"` + // The number of CPU cores for the instance. CoreCount *int64 `locationName:"coreCount" type:"integer"` @@ -66806,6 +67095,12 @@ func (s CpuOptions) GoString() string { return s.String() } +// SetAmdSevSnp sets the AmdSevSnp field's value. +func (s *CpuOptions) SetAmdSevSnp(v string) *CpuOptions { + s.AmdSevSnp = &v + return s +} + // SetCoreCount sets the CoreCount field's value. func (s *CpuOptions) SetCoreCount(v int64) *CpuOptions { s.CoreCount = &v @@ -66823,6 +67118,11 @@ func (s *CpuOptions) SetThreadsPerCore(v int64) *CpuOptions { type CpuOptionsRequest struct { _ struct{} `type:"structure"` + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is + // supported with M6a, R6a, and C6a instance types only. For more information, + // see AMD SEV-SNP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html). + AmdSevSnp *string `type:"string" enum:"AmdSevSnpSpecification"` + // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` @@ -66849,6 +67149,12 @@ func (s CpuOptionsRequest) GoString() string { return s.String() } +// SetAmdSevSnp sets the AmdSevSnp field's value. +func (s *CpuOptionsRequest) SetAmdSevSnp(v string) *CpuOptionsRequest { + s.AmdSevSnp = &v + return s +} + // SetCoreCount sets the CoreCount field's value. func (s *CpuOptionsRequest) SetCoreCount(v int64) *CpuOptionsRequest { s.CoreCount = &v @@ -69718,6 +70024,152 @@ func (s *CreateImageOutput) SetImageId(v string) *CreateImageOutput { return s } +type CreateInstanceConnectEndpointInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether your client's IP address is preserved as the source. The + // value is true or false. + // + // * If true, your client's IP address is used when you connect to a resource. + // + // * If false, the elastic network interface IP address is used when you + // connect to a resource. + // + // Default: true + PreserveClientIp *bool `type:"boolean"` + + // One or more security groups to associate with the endpoint. If you don't + // specify a security group, the default security group for your VPC will be + // associated with the endpoint. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the subnet in which to create the EC2 Instance Connect Endpoint. + // + // SubnetId is a required field + SubnetId *string `type:"string" required:"true"` + + // The tags to apply to the EC2 Instance Connect Endpoint during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateInstanceConnectEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateInstanceConnectEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceConnectEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceConnectEndpointInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateInstanceConnectEndpointInput) SetClientToken(v string) *CreateInstanceConnectEndpointInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateInstanceConnectEndpointInput) SetDryRun(v bool) *CreateInstanceConnectEndpointInput { + s.DryRun = &v + return s +} + +// SetPreserveClientIp sets the PreserveClientIp field's value. +func (s *CreateInstanceConnectEndpointInput) SetPreserveClientIp(v bool) *CreateInstanceConnectEndpointInput { + s.PreserveClientIp = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateInstanceConnectEndpointInput) SetSecurityGroupIds(v []*string) *CreateInstanceConnectEndpointInput { + s.SecurityGroupIds = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *CreateInstanceConnectEndpointInput) SetSubnetId(v string) *CreateInstanceConnectEndpointInput { + s.SubnetId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInstanceConnectEndpointInput) SetTagSpecifications(v []*TagSpecification) *CreateInstanceConnectEndpointInput { + s.TagSpecifications = v + return s +} + +type CreateInstanceConnectEndpointOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive idempotency token provided by the client in the the + // request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the EC2 Instance Connect Endpoint. + InstanceConnectEndpoint *Ec2InstanceConnectEndpoint `locationName:"instanceConnectEndpoint" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateInstanceConnectEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateInstanceConnectEndpointOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateInstanceConnectEndpointOutput) SetClientToken(v string) *CreateInstanceConnectEndpointOutput { + s.ClientToken = &v + return s +} + +// SetInstanceConnectEndpoint sets the InstanceConnectEndpoint field's value. +func (s *CreateInstanceConnectEndpointOutput) SetInstanceConnectEndpoint(v *Ec2InstanceConnectEndpoint) *CreateInstanceConnectEndpointOutput { + s.InstanceConnectEndpoint = v + return s +} + type CreateInstanceEventWindowInput struct { _ struct{} `type:"structure"` @@ -70825,12 +71277,8 @@ type CreateLaunchTemplateInput struct { // The information for the launch template. // - // LaunchTemplateData is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateLaunchTemplateInput's - // String and GoString methods. - // // LaunchTemplateData is a required field - LaunchTemplateData *RequestLaunchTemplateData `type:"structure" required:"true" sensitive:"true"` + LaunchTemplateData *RequestLaunchTemplateData `type:"structure" required:"true"` // A name for the launch template. // @@ -70987,12 +71435,8 @@ type CreateLaunchTemplateVersionInput struct { // The information for the launch template. // - // LaunchTemplateData is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateLaunchTemplateVersionInput's - // String and GoString methods. - // // LaunchTemplateData is a required field - LaunchTemplateData *RequestLaunchTemplateData `type:"structure" required:"true" sensitive:"true"` + LaunchTemplateData *RequestLaunchTemplateData `type:"structure" required:"true"` // The ID of the launch template. // @@ -72563,7 +73007,7 @@ type CreateNetworkInterfaceInput struct { // The type of network interface. The default is interface. // - // The only supported values are efa and trunk. + // The only supported values are interface, efa, and trunk. InterfaceType *string `type:"string" enum:"NetworkInterfaceCreationType"` // The number of IPv4 prefixes that Amazon Web Services automatically assigns @@ -77032,7 +77476,8 @@ func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v strin return s } -// Options for a network interface-type endpoint. +// Describes the network interface options when creating an Amazon Web Services +// Verified Access endpoint using the network-interface type. type CreateVerifiedAccessEndpointEniOptions struct { _ struct{} `type:"structure"` @@ -77103,7 +77548,7 @@ type CreateVerifiedAccessEndpointInput struct { // ApplicationDomain is a required field ApplicationDomain *string `type:"string" required:"true"` - // The Amazon Web Services network component Verified Access attaches to. + // The type of attachment. // // AttachmentType is a required field AttachmentType *string `type:"string" required:"true" enum:"VerifiedAccessEndpointAttachmentType"` @@ -77113,7 +77558,7 @@ type CreateVerifiedAccessEndpointInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access endpoint. + // A description for the Verified Access endpoint. Description *string `type:"string"` // The ARN of the public TLS/SSL certificate in Amazon Web Services Certificate @@ -77129,33 +77574,32 @@ type CreateVerifiedAccessEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // A custom identifier that gets prepended to a DNS name that is generated for + // A custom identifier that is prepended to the DNS name that is generated for // the endpoint. // // EndpointDomainPrefix is a required field EndpointDomainPrefix *string `type:"string" required:"true"` - // The type of Amazon Web Services Verified Access endpoint to create. + // The type of Verified Access endpoint to create. // // EndpointType is a required field EndpointType *string `type:"string" required:"true" enum:"VerifiedAccessEndpointType"` - // The load balancer details if creating the Amazon Web Services Verified Access - // endpoint as load-balancertype. + // The load balancer details. This parameter is required if the endpoint type + // is load-balancer. LoadBalancerOptions *CreateVerifiedAccessEndpointLoadBalancerOptions `type:"structure"` - // The network interface details if creating the Amazon Web Services Verified - // Access endpoint as network-interfacetype. + // The network interface details. This parameter is required if the endpoint + // type is network-interface. NetworkInterfaceOptions *CreateVerifiedAccessEndpointEniOptions `type:"structure"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `type:"string"` - // The Amazon EC2 security groups to associate with the Amazon Web Services - // Verified Access endpoint. + // The IDs of the security groups to associate with the Verified Access endpoint. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` - // The tags to assign to the Amazon Web Services Verified Access endpoint. + // The tags to assign to the Verified Access endpoint. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The ID of the Verified Access group to associate the endpoint with. @@ -77304,8 +77748,8 @@ func (s *CreateVerifiedAccessEndpointInput) SetVerifiedAccessGroupId(v string) * return s } -// Describes a load balancer when creating an Amazon Web Services Verified Access -// endpoint using the load-balancer type. +// Describes the load balancer options when creating an Amazon Web Services +// Verified Access endpoint using the load-balancer type. type CreateVerifiedAccessEndpointLoadBalancerOptions struct { _ struct{} `type:"structure"` @@ -77380,7 +77824,7 @@ func (s *CreateVerifiedAccessEndpointLoadBalancerOptions) SetSubnetIds(v []*stri type CreateVerifiedAccessEndpointOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. VerifiedAccessEndpoint *VerifiedAccessEndpoint `locationName:"verifiedAccessEndpoint" type:"structure"` } @@ -77416,7 +77860,7 @@ type CreateVerifiedAccessGroupInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access group. + // A description for the Verified Access group. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -77425,13 +77869,13 @@ type CreateVerifiedAccessGroupInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `type:"string"` - // The tags to assign to the Amazon Web Services Verified Access group. + // The tags to assign to the Verified Access group. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. // // VerifiedAccessInstanceId is a required field VerifiedAccessInstanceId *string `type:"string" required:"true"` @@ -77543,7 +77987,7 @@ type CreateVerifiedAccessInstanceInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access instance. + // A description for the Verified Access instance. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -77552,7 +77996,7 @@ type CreateVerifiedAccessInstanceInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The tags to assign to the Amazon Web Services Verified Access instance. + // The tags to assign to the Verified Access instance. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } @@ -77601,7 +78045,7 @@ func (s *CreateVerifiedAccessInstanceInput) SetTagSpecifications(v []*TagSpecifi type CreateVerifiedAccessInstanceOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"` } @@ -77629,7 +78073,8 @@ func (s *CreateVerifiedAccessInstanceOutput) SetVerifiedAccessInstance(v *Verifi return s } -// Options for a device-identity type trust provider. +// Describes the options when creating an Amazon Web Services Verified Access +// trust provider using the device type. type CreateVerifiedAccessTrustProviderDeviceOptions struct { _ struct{} `type:"structure"` @@ -77669,13 +78114,15 @@ type CreateVerifiedAccessTrustProviderInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access trust provider. + // A description for the Verified Access trust provider. Description *string `type:"string"` - // The options for device identity based trust providers. + // The options for a device-based trust provider. This parameter is required + // when the provider type is device. DeviceOptions *CreateVerifiedAccessTrustProviderDeviceOptions `type:"structure"` - // The type of device-based trust provider. + // The type of device-based trust provider. This parameter is required when + // the provider type is device. DeviceTrustProviderType *string `type:"string" enum:"DeviceTrustProviderType"` // Checks whether you have the required permissions for the action, without @@ -77684,7 +78131,8 @@ type CreateVerifiedAccessTrustProviderInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The OpenID Connect details for an oidc-type, user-identity based trust provider. + // The options for a OpenID Connect-compatible user-identity trust provider. + // This parameter is required when the provider type is user. OidcOptions *CreateVerifiedAccessTrustProviderOidcOptions `type:"structure"` // The identifier to be used when working with policy rules. @@ -77692,15 +78140,16 @@ type CreateVerifiedAccessTrustProviderInput struct { // PolicyReferenceName is a required field PolicyReferenceName *string `type:"string" required:"true"` - // The tags to assign to the Amazon Web Services Verified Access trust provider. + // The tags to assign to the Verified Access trust provider. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The type of trust provider can be either user or device-based. + // The type of trust provider. // // TrustProviderType is a required field TrustProviderType *string `type:"string" required:"true" enum:"TrustProviderType"` - // The type of user-based trust provider. + // The type of user-based trust provider. This parameter is required when the + // provider type is user. UserTrustProviderType *string `type:"string" enum:"UserTrustProviderType"` } @@ -77798,7 +78247,8 @@ func (s *CreateVerifiedAccessTrustProviderInput) SetUserTrustProviderType(v stri return s } -// Options for an OIDC-based, user-identity type trust provider. +// Describes the options when creating an Amazon Web Services Verified Access +// trust provider using the user type. type CreateVerifiedAccessTrustProviderOidcOptions struct { _ struct{} `type:"structure"` @@ -77809,7 +78259,11 @@ type CreateVerifiedAccessTrustProviderOidcOptions struct { ClientId *string `type:"string"` // The client secret. - ClientSecret *string `type:"string"` + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateVerifiedAccessTrustProviderOidcOptions's + // String and GoString methods. + ClientSecret *string `type:"string" sensitive:"true"` // The OIDC issuer. Issuer *string `type:"string"` @@ -77889,7 +78343,7 @@ func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetUserInfoEndpoint(v str type CreateVerifiedAccessTrustProviderOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"` } @@ -80942,6 +81396,95 @@ func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput { return s } +type DeleteInstanceConnectEndpointInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the EC2 Instance Connect Endpoint to delete. + // + // InstanceConnectEndpointId is a required field + InstanceConnectEndpointId *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInstanceConnectEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInstanceConnectEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstanceConnectEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceConnectEndpointInput"} + if s.InstanceConnectEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceConnectEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteInstanceConnectEndpointInput) SetDryRun(v bool) *DeleteInstanceConnectEndpointInput { + s.DryRun = &v + return s +} + +// SetInstanceConnectEndpointId sets the InstanceConnectEndpointId field's value. +func (s *DeleteInstanceConnectEndpointInput) SetInstanceConnectEndpointId(v string) *DeleteInstanceConnectEndpointInput { + s.InstanceConnectEndpointId = &v + return s +} + +type DeleteInstanceConnectEndpointOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Instance Connect Endpoint. + InstanceConnectEndpoint *Ec2InstanceConnectEndpoint `locationName:"instanceConnectEndpoint" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInstanceConnectEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInstanceConnectEndpointOutput) GoString() string { + return s.String() +} + +// SetInstanceConnectEndpoint sets the InstanceConnectEndpoint field's value. +func (s *DeleteInstanceConnectEndpointOutput) SetInstanceConnectEndpoint(v *Ec2InstanceConnectEndpoint) *DeleteInstanceConnectEndpointOutput { + s.InstanceConnectEndpoint = v + return s +} + type DeleteInstanceEventWindowInput struct { _ struct{} `type:"structure"` @@ -85553,7 +86096,7 @@ type DeleteVerifiedAccessEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. // // VerifiedAccessEndpointId is a required field VerifiedAccessEndpointId *string `type:"string" required:"true"` @@ -85611,7 +86154,7 @@ func (s *DeleteVerifiedAccessEndpointInput) SetVerifiedAccessEndpointId(v string type DeleteVerifiedAccessEndpointOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. VerifiedAccessEndpoint *VerifiedAccessEndpoint `locationName:"verifiedAccessEndpoint" type:"structure"` } @@ -85653,7 +86196,7 @@ type DeleteVerifiedAccessGroupInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. // // VerifiedAccessGroupId is a required field VerifiedAccessGroupId *string `type:"string" required:"true"` @@ -85711,7 +86254,7 @@ func (s *DeleteVerifiedAccessGroupInput) SetVerifiedAccessGroupId(v string) *Del type DeleteVerifiedAccessGroupOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. VerifiedAccessGroup *VerifiedAccessGroup `locationName:"verifiedAccessGroup" type:"structure"` } @@ -85753,7 +86296,7 @@ type DeleteVerifiedAccessInstanceInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. // // VerifiedAccessInstanceId is a required field VerifiedAccessInstanceId *string `type:"string" required:"true"` @@ -85811,7 +86354,7 @@ func (s *DeleteVerifiedAccessInstanceInput) SetVerifiedAccessInstanceId(v string type DeleteVerifiedAccessInstanceOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"` } @@ -85853,7 +86396,7 @@ type DeleteVerifiedAccessTrustProviderInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. // // VerifiedAccessTrustProviderId is a required field VerifiedAccessTrustProviderId *string `type:"string" required:"true"` @@ -85911,7 +86454,7 @@ func (s *DeleteVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderI type DeleteVerifiedAccessTrustProviderOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"` } @@ -87094,7 +87637,9 @@ type DeregisterInstanceEventNotificationAttributesInput struct { DryRun *bool `type:"boolean"` // Information about the tag keys to deregister. - InstanceTagAttribute *DeregisterInstanceTagAttributeRequest `type:"structure"` + // + // InstanceTagAttribute is a required field + InstanceTagAttribute *DeregisterInstanceTagAttributeRequest `type:"structure" required:"true"` } // String returns the string representation. @@ -87115,6 +87660,19 @@ func (s DeregisterInstanceEventNotificationAttributesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterInstanceEventNotificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterInstanceEventNotificationAttributesInput"} + if s.InstanceTagAttribute == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceTagAttribute")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *DeregisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DeregisterInstanceEventNotificationAttributesInput { s.DryRun = &v @@ -87705,7 +88263,7 @@ func (s *DescribeAddressesAttributeOutput) SetNextToken(v string) *DescribeAddre type DescribeAddressesInput struct { _ struct{} `type:"structure"` - // [EC2-VPC] Information about the allocation IDs. + // Information about the allocation IDs. AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` // Checks whether you have the required permissions for the action, without @@ -87716,12 +88274,9 @@ type DescribeAddressesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * allocation-id - [EC2-VPC] The allocation ID for the address. - // - // * association-id - [EC2-VPC] The association ID for the address. + // * allocation-id - The allocation ID for the address. // - // * domain - Indicates whether the address is for use in EC2-Classic (standard) - // or in a VPC (vpc). + // * association-id - The association ID for the address. // // * instance-id - The ID of the instance the address is associated with, // if any. @@ -87729,14 +88284,14 @@ type DescribeAddressesInput struct { // * network-border-group - A unique set of Availability Zones, Local Zones, // or Wavelength Zones from where Amazon Web Services advertises IP addresses. // - // * network-interface-id - [EC2-VPC] The ID of the network interface that - // the address is associated with, if any. + // * network-interface-id - The ID of the network interface that the address + // is associated with, if any. // // * network-interface-owner-id - The Amazon Web Services account ID of the // owner. // - // * private-ip-address - [EC2-VPC] The private IP address associated with - // the Elastic IP address. + // * private-ip-address - The private IP address associated with the Elastic + // IP address. // // * public-ip - The Elastic IP address, or the carrier IP address. // @@ -93722,6 +94277,159 @@ func (s *DescribeInstanceAttributeOutput) SetUserData(v *AttributeValue) *Descri return s } +type DescribeInstanceConnectEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * instance-connect-endpoint-id - The ID of the EC2 Instance Connect Endpoint. + // + // * state - The state of the EC2 Instance Connect Endpoint (create-in-progress + // | create-complete | create-failed | delete-in-progress | delete-complete + // | delete-failed). + // + // * subnet-id - The ID of the subnet in which the EC2 Instance Connect Endpoint + // was created. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * tag-value - The value of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific value, regardless + // of tag key. + // + // * vpc-id - The ID of the VPC in which the EC2 Instance Connect Endpoint + // was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more EC2 Instance Connect Endpoint IDs. + InstanceConnectEndpointIds []*string `locationName:"InstanceConnectEndpointId" locationNameList:"item" type:"list"` + + // The maximum number of items to return for this request. To get the next page + // of items, make another request with the token returned in the output. For + // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). + MaxResults *int64 `min:"1" type:"integer"` + + // The token returned from a previous paginated request. Pagination continues + // from the end of the items returned by the previous request. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInstanceConnectEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInstanceConnectEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceConnectEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceConnectEndpointsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceConnectEndpointsInput) SetDryRun(v bool) *DescribeInstanceConnectEndpointsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstanceConnectEndpointsInput) SetFilters(v []*Filter) *DescribeInstanceConnectEndpointsInput { + s.Filters = v + return s +} + +// SetInstanceConnectEndpointIds sets the InstanceConnectEndpointIds field's value. +func (s *DescribeInstanceConnectEndpointsInput) SetInstanceConnectEndpointIds(v []*string) *DescribeInstanceConnectEndpointsInput { + s.InstanceConnectEndpointIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceConnectEndpointsInput) SetMaxResults(v int64) *DescribeInstanceConnectEndpointsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceConnectEndpointsInput) SetNextToken(v string) *DescribeInstanceConnectEndpointsInput { + s.NextToken = &v + return s +} + +type DescribeInstanceConnectEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Instance Connect Endpoints. + InstanceConnectEndpoints []*Ec2InstanceConnectEndpoint `locationName:"instanceConnectEndpointSet" locationNameList:"item" type:"list"` + + // The token to include in another request to get the next page of items. This + // value is null when there are no more items to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInstanceConnectEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInstanceConnectEndpointsOutput) GoString() string { + return s.String() +} + +// SetInstanceConnectEndpoints sets the InstanceConnectEndpoints field's value. +func (s *DescribeInstanceConnectEndpointsOutput) SetInstanceConnectEndpoints(v []*Ec2InstanceConnectEndpoint) *DescribeInstanceConnectEndpointsOutput { + s.InstanceConnectEndpoints = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceConnectEndpointsOutput) SetNextToken(v string) *DescribeInstanceConnectEndpointsOutput { + s.NextToken = &v + return s +} + type DescribeInstanceCreditSpecificationsInput struct { _ struct{} `type:"structure"` @@ -94400,8 +95108,8 @@ type DescribeInstanceTypesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * auto-recovery-supported - Indicates whether auto recovery is supported - // (true | false). + // * auto-recovery-supported - Indicates whether Amazon CloudWatch action + // based recovery is supported (true | false). // // * bare-metal - Indicates whether it is a bare metal instance type (true // | false). @@ -94509,6 +95217,8 @@ type DescribeInstanceTypesInput struct { // * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in // GHz. // + // * processor-info.supported-features - The supported CPU features (amd-sev-snp). + // // * supported-boot-mode - The boot mode (legacy-bios | uefi). // // * supported-root-device-type - The root device type (ebs | instance-store). @@ -94613,6 +95323,12 @@ type DescribeInstanceTypesOutput struct { // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon EC2 User Guide. + // + // When you change your EBS-backed instance type, instance restart or replacement + // behavior depends on the instance type compatibility between the old and new + // types. An instance that's backed by an instance store volume is always replaced. + // For more information, see Change the instance type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-resize.html) + // in the Amazon EC2 User Guide. InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -94690,12 +95406,6 @@ type DescribeInstancesInput struct { // // * dns-name - The public DNS name of the instance. // - // * group-id - The ID of the security group for the instance. EC2-Classic - // only. - // - // * group-name - The name of the security group for the instance. EC2-Classic - // only. - // // * hibernation-options.configured - A Boolean that indicates whether the // instance is enabled for hibernation. A value of true means that the instance // is enabled for hibernation. @@ -99700,16 +100410,11 @@ type DescribeReservedInstancesInput struct { // // * scope - The scope of the Reserved Instance (Region or Availability Zone). // - // * product-description - The Reserved Instance product platform description. - // Instances that include (Amazon VPC) in the product platform description - // will only be displayed to EC2-Classic account holders and are for use - // with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE - // Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux - // (Amazon VPC) | Red Hat Enterprise Linux with HA (Amazon VPC) | Windows - // | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with - // SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows - // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise - // | Windows with SQL Server Enterprise (Amazon VPC)). + // * product-description - The Reserved Instance product platform description + // (Linux/UNIX | Linux with SQL Server Standard | Linux with SQL Server Web + // | Linux with SQL Server Enterprise | SUSE Linux | Red Hat Enterprise Linux + // | Red Hat Enterprise Linux with HA | Windows | Windows with SQL Server + // Standard | Windows with SQL Server Web | Windows with SQL Server Enterprise). // // * reserved-instances-id - The ID of the Reserved Instance. // @@ -99911,9 +100616,6 @@ type DescribeReservedInstancesModificationsInput struct { // * modification-result.target-configuration.instance-type - The instance // type of the new Reserved Instances. // - // * modification-result.target-configuration.platform - The network platform - // of the new Reserved Instances (EC2-Classic | EC2-VPC). - // // * reserved-instances-id - The ID of the Reserved Instances modified. // // * reserved-instances-modification-id - The ID of the modification request. @@ -100042,16 +100744,11 @@ type DescribeReservedInstancesOfferingsInput struct { // all offerings from both Amazon Web Services and the Reserved Instance // Marketplace are listed. // - // * product-description - The Reserved Instance product platform description. - // Instances that include (Amazon VPC) in the product platform description - // will only be displayed to EC2-Classic account holders and are for use - // with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | - // SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise - // Linux (Amazon VPC) | Red Hat Enterprise Linux with HA (Amazon VPC) | Windows - // | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with - // SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows - // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise - // | Windows with SQL Server Enterprise (Amazon VPC)) + // * product-description - The Reserved Instance product platform description + // (Linux/UNIX | Linux with SQL Server Standard | Linux with SQL Server Web + // | Linux with SQL Server Enterprise | SUSE Linux | Red Hat Enterprise Linux + // | Red Hat Enterprise Linux with HA | Windows | Windows with SQL Server + // Standard | Windows with SQL Server Web | Windows with SQL Server Enterprise). // // * reserved-instances-offering-id - The Reserved Instances offering ID. // @@ -100512,8 +101209,6 @@ type DescribeScheduledInstanceAvailabilityInput struct { // // * instance-type - The instance type (for example, c4.large). // - // * network-platform - The network platform (EC2-Classic or EC2-VPC). - // // * platform - The platform (Linux/UNIX or Windows). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -100694,8 +101389,6 @@ type DescribeScheduledInstancesInput struct { // // * instance-type - The instance type (for example, c4.large). // - // * network-platform - The network platform (EC2-Classic or EC2-VPC). - // // * platform - The platform (Linux/UNIX or Windows). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -102167,7 +102860,7 @@ type DescribeSpotInstanceRequestsInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // One or more filters. + // The filters. // // * availability-zone-group - The Availability Zone group. // @@ -102284,7 +102977,7 @@ type DescribeSpotInstanceRequestsInput struct { // from the end of the items returned by the previous request. NextToken *string `type:"string"` - // One or more Spot Instance request IDs. + // The IDs of the Spot Instance requests. SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"` } @@ -102344,7 +103037,7 @@ type DescribeSpotInstanceRequestsOutput struct { // value is null when there are no more items to return. NextToken *string `locationName:"nextToken" type:"string"` - // One or more Spot Instance requests. + // The Spot Instance requests. SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` } @@ -102395,7 +103088,7 @@ type DescribeSpotPriceHistoryInput struct { // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // One or more filters. + // The filters. // // * availability-zone - The Availability Zone for which prices should be // returned. @@ -105087,13 +105780,13 @@ type DescribeVerifiedAccessEndpointsInput struct { // The token for the next page of results. NextToken *string `type:"string"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. VerifiedAccessEndpointIds []*string `locationName:"VerifiedAccessEndpointId" locationNameList:"item" type:"list"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. VerifiedAccessGroupId *string `type:"string"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstanceId *string `type:"string"` } @@ -105177,7 +105870,7 @@ type DescribeVerifiedAccessEndpointsOutput struct { // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. VerifiedAccessEndpoints []*VerifiedAccessEndpoint `locationName:"verifiedAccessEndpointSet" locationNameList:"item" type:"list"` } @@ -105230,10 +105923,10 @@ type DescribeVerifiedAccessGroupsInput struct { // The token for the next page of results. NextToken *string `type:"string"` - // The ID of the Amazon Web Services Verified Access groups. + // The ID of the Verified Access groups. VerifiedAccessGroupIds []*string `locationName:"VerifiedAccessGroupId" locationNameList:"item" type:"list"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstanceId *string `type:"string"` } @@ -105364,7 +106057,7 @@ type DescribeVerifiedAccessInstanceLoggingConfigurationsInput struct { // The token for the next page of results. NextToken *string `type:"string"` - // The IDs of the Amazon Web Services Verified Access instances. + // The IDs of the Verified Access instances. VerifiedAccessInstanceIds []*string `locationName:"VerifiedAccessInstanceId" locationNameList:"item" type:"list"` } @@ -105432,8 +106125,7 @@ func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) SetVerifiedAc type DescribeVerifiedAccessInstanceLoggingConfigurationsOutput struct { _ struct{} `type:"structure"` - // The current logging configuration for the Amazon Web Services Verified Access - // instances. + // The current logging configuration for the Verified Access instances. LoggingConfigurations []*VerifiedAccessInstanceLoggingConfiguration `locationName:"loggingConfigurationSet" locationNameList:"item" type:"list"` // The token to use to retrieve the next page of results. This value is null @@ -105490,7 +106182,7 @@ type DescribeVerifiedAccessInstancesInput struct { // The token for the next page of results. NextToken *string `type:"string"` - // The IDs of the Amazon Web Services Verified Access instances. + // The IDs of the Verified Access instances. VerifiedAccessInstanceIds []*string `locationName:"VerifiedAccessInstanceId" locationNameList:"item" type:"list"` } @@ -105562,7 +106254,7 @@ type DescribeVerifiedAccessInstancesOutput struct { // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The IDs of the Amazon Web Services Verified Access instances. + // The IDs of the Verified Access instances. VerifiedAccessInstances []*VerifiedAccessInstance `locationName:"verifiedAccessInstanceSet" locationNameList:"item" type:"list"` } @@ -105615,7 +106307,7 @@ type DescribeVerifiedAccessTrustProvidersInput struct { // The token for the next page of results. NextToken *string `type:"string"` - // The IDs of the Amazon Web Services Verified Access trust providers. + // The IDs of the Verified Access trust providers. VerifiedAccessTrustProviderIds []*string `locationName:"VerifiedAccessTrustProviderId" locationNameList:"item" type:"list"` } @@ -105687,7 +106379,7 @@ type DescribeVerifiedAccessTrustProvidersOutput struct { // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The IDs of the Amazon Web Services Verified Access trust providers. + // The IDs of the Verified Access trust providers. VerifiedAccessTrustProviders []*VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProviderSet" locationNameList:"item" type:"list"` } @@ -108428,12 +109120,12 @@ type DetachVerifiedAccessTrustProviderInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. // // VerifiedAccessInstanceId is a required field VerifiedAccessInstanceId *string `type:"string" required:"true"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. // // VerifiedAccessTrustProviderId is a required field VerifiedAccessTrustProviderId *string `type:"string" required:"true"` @@ -108500,10 +109192,10 @@ func (s *DetachVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderI type DetachVerifiedAccessTrustProviderOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"` } @@ -108724,8 +109416,8 @@ func (s DetachVpnGatewayOutput) GoString() string { return s.String() } -// Options for an Amazon Web Services Verified Access device-identity based -// trust provider. +// Describes the options for an Amazon Web Services Verified Access device-identity +// based trust provider. type DeviceOptions struct { _ struct{} `type:"structure"` @@ -110325,7 +111017,7 @@ func (s *DisableVpcClassicLinkOutput) SetReturn(v bool) *DisableVpcClassicLinkOu type DisassociateAddressInput struct { _ struct{} `type:"structure"` - // [EC2-VPC] The association ID. Required for EC2-VPC. + // The association ID. This parameter is required. AssociationId *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -110334,7 +111026,7 @@ type DisassociateAddressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + // Deprecated. PublicIp *string `type:"string"` } @@ -111789,7 +112481,11 @@ type DiskImageDescription struct { // // For information about the import manifest referenced by this API action, // see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). - ImportManifestUrl *string `locationName:"importManifestUrl" type:"string"` + // + // ImportManifestUrl is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by DiskImageDescription's + // String and GoString methods. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" sensitive:"true"` // The size of the disk image, in GiB. Size *int64 `locationName:"size" type:"long"` @@ -112620,6 +113316,176 @@ func (s *EbsOptimizedInfo) SetMaximumThroughputInMBps(v float64) *EbsOptimizedIn return s } +// The EC2 Instance Connect Endpoint. +type Ec2InstanceConnectEndpoint struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the EC2 Instance Connect Endpoint. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The date and time that the EC2 Instance Connect Endpoint was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The DNS name of the EC2 Instance Connect Endpoint. + DnsName *string `locationName:"dnsName" type:"string"` + + FipsDnsName *string `locationName:"fipsDnsName" type:"string"` + + // The Amazon Resource Name (ARN) of the EC2 Instance Connect Endpoint. + InstanceConnectEndpointArn *string `locationName:"instanceConnectEndpointArn" min:"1" type:"string"` + + // The ID of the EC2 Instance Connect Endpoint. + InstanceConnectEndpointId *string `locationName:"instanceConnectEndpointId" type:"string"` + + // The ID of the elastic network interface that Amazon EC2 automatically created + // when creating the EC2 Instance Connect Endpoint. + NetworkInterfaceIds []*string `locationName:"networkInterfaceIdSet" locationNameList:"item" type:"list"` + + // The ID of the Amazon Web Services account that created the EC2 Instance Connect + // Endpoint. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Indicates whether your client's IP address is preserved as the source. The + // value is true or false. + // + // * If true, your client's IP address is used when you connect to a resource. + // + // * If false, the elastic network interface IP address is used when you + // connect to a resource. + // + // Default: true + PreserveClientIp *bool `locationName:"preserveClientIp" type:"boolean"` + + // The security groups associated with the endpoint. If you didn't specify a + // security group, the default security group for your VPC is associated with + // the endpoint. + SecurityGroupIds []*string `locationName:"securityGroupIdSet" locationNameList:"item" type:"list"` + + // The current state of the EC2 Instance Connect Endpoint. + State *string `locationName:"state" type:"string" enum:"Ec2InstanceConnectEndpointState"` + + // The message for the current state of the EC2 Instance Connect Endpoint. Can + // include a failure message. + StateMessage *string `locationName:"stateMessage" type:"string"` + + // The ID of the subnet in which the EC2 Instance Connect Endpoint was created. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The tags assigned to the EC2 Instance Connect Endpoint. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC in which the EC2 Instance Connect Endpoint was created. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Ec2InstanceConnectEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Ec2InstanceConnectEndpoint) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Ec2InstanceConnectEndpoint) SetAvailabilityZone(v string) *Ec2InstanceConnectEndpoint { + s.AvailabilityZone = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Ec2InstanceConnectEndpoint) SetCreatedAt(v time.Time) *Ec2InstanceConnectEndpoint { + s.CreatedAt = &v + return s +} + +// SetDnsName sets the DnsName field's value. +func (s *Ec2InstanceConnectEndpoint) SetDnsName(v string) *Ec2InstanceConnectEndpoint { + s.DnsName = &v + return s +} + +// SetFipsDnsName sets the FipsDnsName field's value. +func (s *Ec2InstanceConnectEndpoint) SetFipsDnsName(v string) *Ec2InstanceConnectEndpoint { + s.FipsDnsName = &v + return s +} + +// SetInstanceConnectEndpointArn sets the InstanceConnectEndpointArn field's value. +func (s *Ec2InstanceConnectEndpoint) SetInstanceConnectEndpointArn(v string) *Ec2InstanceConnectEndpoint { + s.InstanceConnectEndpointArn = &v + return s +} + +// SetInstanceConnectEndpointId sets the InstanceConnectEndpointId field's value. +func (s *Ec2InstanceConnectEndpoint) SetInstanceConnectEndpointId(v string) *Ec2InstanceConnectEndpoint { + s.InstanceConnectEndpointId = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *Ec2InstanceConnectEndpoint) SetNetworkInterfaceIds(v []*string) *Ec2InstanceConnectEndpoint { + s.NetworkInterfaceIds = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Ec2InstanceConnectEndpoint) SetOwnerId(v string) *Ec2InstanceConnectEndpoint { + s.OwnerId = &v + return s +} + +// SetPreserveClientIp sets the PreserveClientIp field's value. +func (s *Ec2InstanceConnectEndpoint) SetPreserveClientIp(v bool) *Ec2InstanceConnectEndpoint { + s.PreserveClientIp = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *Ec2InstanceConnectEndpoint) SetSecurityGroupIds(v []*string) *Ec2InstanceConnectEndpoint { + s.SecurityGroupIds = v + return s +} + +// SetState sets the State field's value. +func (s *Ec2InstanceConnectEndpoint) SetState(v string) *Ec2InstanceConnectEndpoint { + s.State = &v + return s +} + +// SetStateMessage sets the StateMessage field's value. +func (s *Ec2InstanceConnectEndpoint) SetStateMessage(v string) *Ec2InstanceConnectEndpoint { + s.StateMessage = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *Ec2InstanceConnectEndpoint) SetSubnetId(v string) *Ec2InstanceConnectEndpoint { + s.SubnetId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Ec2InstanceConnectEndpoint) SetTags(v []*Tag) *Ec2InstanceConnectEndpoint { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *Ec2InstanceConnectEndpoint) SetVpcId(v string) *Ec2InstanceConnectEndpoint { + s.VpcId = &v + return s +} + // Describes the Elastic Fabric Adapters for the instance type. type EfaInfo struct { _ struct{} `type:"structure"` @@ -117610,8 +118476,9 @@ type FleetLaunchTemplateOverrides struct { // The Availability Zone in which to launch the instances. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The ID of the AMI. An AMI is required to launch an instance. The AMI ID must - // be specified here or in the launch template. + // The ID of the AMI. An AMI is required to launch an instance. This parameter + // is only available for fleets of type instant. For fleets of type maintain + // and request, you must specify the AMI ID in the launch template. ImageId *string `locationName:"imageId" type:"string"` // The attributes for the instance types. When you specify instance attributes, @@ -117742,8 +118609,9 @@ type FleetLaunchTemplateOverridesRequest struct { // The Availability Zone in which to launch the instances. AvailabilityZone *string `type:"string"` - // The ID of the AMI. An AMI is required to launch an instance. The AMI ID must - // be specified here or in the launch template. + // The ID of the AMI. An AMI is required to launch an instance. This parameter + // is only available for fleets of type instant. For fleets of type maintain + // and request, you must specify the AMI ID in the launch template. ImageId *string `type:"string"` // The attributes for the instance types. When you specify instance attributes, @@ -123716,7 +124584,7 @@ type GetVerifiedAccessEndpointPolicyInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. // // VerifiedAccessEndpointId is a required field VerifiedAccessEndpointId *string `type:"string" required:"true"` @@ -123768,7 +124636,7 @@ func (s *GetVerifiedAccessEndpointPolicyInput) SetVerifiedAccessEndpointId(v str type GetVerifiedAccessEndpointPolicyOutput struct { _ struct{} `type:"structure"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `locationName:"policyDocument" type:"string"` // The status of the Verified Access policy. @@ -123814,7 +124682,7 @@ type GetVerifiedAccessGroupPolicyInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. // // VerifiedAccessGroupId is a required field VerifiedAccessGroupId *string `type:"string" required:"true"` @@ -123866,7 +124734,7 @@ func (s *GetVerifiedAccessGroupPolicyInput) SetVerifiedAccessGroupId(v string) * type GetVerifiedAccessGroupPolicyOutput struct { _ struct{} `type:"structure"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `locationName:"policyDocument" type:"string"` // The status of the Verified Access policy. @@ -124463,14 +125331,14 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { } // Indicates whether your instance is configured for hibernation. This parameter -// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. type HibernationOptions struct { _ struct{} `type:"structure"` - // If this parameter is set to true, your instance is enabled for hibernation; - // otherwise, it is not enabled for hibernation. + // If true, your instance is enabled for hibernation; otherwise, it is not enabled + // for hibernation. Configured *bool `locationName:"configured" type:"boolean"` } @@ -124499,13 +125367,13 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { } // Indicates whether your instance is configured for hibernation. This parameter -// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. type HibernationOptionsRequest struct { _ struct{} `type:"structure"` - // If you set this parameter to true, your instance is enabled for hibernation. + // Set to true to enable your instance for hibernation. // // Default: false Configured *bool `type:"boolean"` @@ -124657,6 +125525,9 @@ type Host struct { // Dedicated Host supports a single instance type only. AllowsMultipleInstanceTypes *string `locationName:"allowsMultipleInstanceTypes" type:"string" enum:"AllowsMultipleInstanceTypes"` + // The ID of the Outpost hardware asset on which the Dedicated Host is allocated. + AssetId *string `locationName:"assetId" type:"string"` + // Whether auto-placement is on or off. AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` @@ -124745,6 +125616,12 @@ func (s *Host) SetAllowsMultipleInstanceTypes(v string) *Host { return s } +// SetAssetId sets the AssetId field's value. +func (s *Host) SetAssetId(v string) *Host { + s.AssetId = &v + return s +} + // SetAutoPlacement sets the AutoPlacement field's value. func (s *Host) SetAutoPlacement(v string) *Host { s.AutoPlacement = &v @@ -125853,7 +126730,11 @@ type ImageDiskContainer struct { // The URL to the Amazon S3-based disk image being imported. The URL can either // be a https URL (https://..) or an Amazon S3 URL (s3://..) - Url *string `type:"string"` + // + // Url is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ImageDiskContainer's + // String and GoString methods. + Url *string `type:"string" sensitive:"true"` // The S3 bucket for the disk image. UserBucket *UserBucket `type:"structure"` @@ -127913,7 +128794,7 @@ type Instance struct { // The monitoring for the instance. Monitoring *Monitoring `locationName:"monitoring" type:"structure"` - // [EC2-VPC] The network interfaces for the instance. + // The network interfaces for the instance. NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` // The Amazon Resource Name (ARN) of the Outpost. @@ -127930,14 +128811,14 @@ type Instance struct { // in the Amazon EC2 User Guide. PlatformDetails *string `locationName:"platformDetails" type:"string"` - // (IPv4 only) The private DNS hostname name assigned to the instance. This + // [IPv4 only] The private DNS hostname name assigned to the instance. This // DNS hostname can only be used inside the Amazon EC2 network. This name is // not available until the instance enters the running state. // - // [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private - // DNS hostnames if you've enabled DNS resolution and DNS hostnames in your - // VPC. If you are not using the Amazon-provided DNS server in your VPC, your - // custom domain name servers must resolve the hostname as appropriate. + // The Amazon-provided DNS server resolves Amazon-provided private DNS hostnames + // if you've enabled DNS resolution and DNS hostnames in your VPC. If you are + // not using the Amazon-provided DNS server in your VPC, your custom domain + // name servers must resolve the hostname as appropriate. PrivateDnsName *string `locationName:"privateDnsName" type:"string"` // The options for the instance hostname. @@ -127949,9 +128830,9 @@ type Instance struct { // The product codes attached to this instance, if applicable. ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` - // (IPv4 only) The public DNS name assigned to the instance. This name is not - // available until the instance enters the running state. For EC2-VPC, this - // name is only available if you've enabled DNS hostnames for your VPC. + // [IPv4 only] The public DNS name assigned to the instance. This name is not + // available until the instance enters the running state. This name is only + // available if you've enabled DNS hostnames for your VPC. PublicDnsName *string `locationName:"dnsName" type:"string"` // The public IPv4 address, or the Carrier IP address assigned to the instance, @@ -127993,7 +128874,7 @@ type Instance struct { // The reason for the most recent state transition. This might be an empty string. StateTransitionReason *string `locationName:"reason" type:"string"` - // [EC2-VPC] The ID of the subnet in which the instance is running. + // The ID of the subnet in which the instance is running. SubnetId *string `locationName:"subnetId" type:"string"` // Any tags assigned to the instance. @@ -128015,7 +128896,7 @@ type Instance struct { // The virtualization type of the instance. VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` - // [EC2-VPC] The ID of the VPC in which the instance is running. + // The ID of the VPC in which the instance is running. VpcId *string `locationName:"vpcId" type:"string"` } @@ -131808,7 +132689,7 @@ func (s *InstanceTagNotificationAttribute) SetInstanceTagKeys(v []*string) *Inst type InstanceTypeInfo struct { _ struct{} `type:"structure"` - // Indicates whether auto recovery is supported. + // Indicates whether Amazon CloudWatch action based recovery is supported. AutoRecoverySupported *bool `locationName:"autoRecoverySupported" type:"boolean"` // Indicates whether the instance is a bare metal instance type. @@ -131860,6 +132741,15 @@ type InstanceTypeInfo struct { // Describes the network settings for the instance type. NetworkInfo *NetworkInfo `locationName:"networkInfo" type:"structure"` + // Indicates whether Nitro Enclaves is supported. + NitroEnclavesSupport *string `locationName:"nitroEnclavesSupport" type:"string" enum:"NitroEnclavesSupport"` + + // Describes the supported NitroTPM versions for the instance type. + NitroTpmInfo *NitroTpmInfo `locationName:"nitroTpmInfo" type:"structure"` + + // Indicates whether NitroTPM is supported. + NitroTpmSupport *string `locationName:"nitroTpmSupport" type:"string" enum:"NitroTpmSupport"` + // Describes the placement group settings for the instance type. PlacementGroupInfo *PlacementGroupInfo `locationName:"placementGroupInfo" type:"structure"` @@ -132003,6 +132893,24 @@ func (s *InstanceTypeInfo) SetNetworkInfo(v *NetworkInfo) *InstanceTypeInfo { return s } +// SetNitroEnclavesSupport sets the NitroEnclavesSupport field's value. +func (s *InstanceTypeInfo) SetNitroEnclavesSupport(v string) *InstanceTypeInfo { + s.NitroEnclavesSupport = &v + return s +} + +// SetNitroTpmInfo sets the NitroTpmInfo field's value. +func (s *InstanceTypeInfo) SetNitroTpmInfo(v *NitroTpmInfo) *InstanceTypeInfo { + s.NitroTpmInfo = v + return s +} + +// SetNitroTpmSupport sets the NitroTpmSupport field's value. +func (s *InstanceTypeInfo) SetNitroTpmSupport(v string) *InstanceTypeInfo { + s.NitroTpmSupport = &v + return s +} + // SetPlacementGroupInfo sets the PlacementGroupInfo field's value. func (s *InstanceTypeInfo) SetPlacementGroupInfo(v *PlacementGroupInfo) *InstanceTypeInfo { s.PlacementGroupInfo = v @@ -134884,7 +135792,7 @@ type LaunchSpecification struct { // Deprecated. AddressingType *string `locationName:"addressingType" type:"string"` - // One or more block device mapping entries. + // The block device mapping entries. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -134914,8 +135822,8 @@ type LaunchSpecification struct { // Describes the monitoring of an instance. Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` - // One or more network interfaces. If you specify a network interface, you must - // specify subnet IDs and security group IDs using the network interface. + // The network interfaces. If you specify a network interface, you must specify + // subnet IDs and security group IDs using the network interface. NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` // The placement information for the instance. @@ -134924,9 +135832,7 @@ type LaunchSpecification struct { // The ID of the RAM disk. RamdiskId *string `locationName:"ramdiskId" type:"string"` - // One or more security groups. When requesting instances in a VPC, you must - // specify the IDs of the security groups. When requesting instances in EC2-Classic, - // you can specify the names or the IDs of the security groups. + // The IDs of the security groups. SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // The ID of the subnet in which to launch the instance. @@ -135407,7 +136313,9 @@ func (s *LaunchTemplateCapacityReservationSpecificationResponse) SetCapacityRese type LaunchTemplateConfig struct { _ struct{} `type:"structure"` - // The launch template. + // The launch template to use. Make sure that the launch template does not contain + // the NetworkInterfaceId parameter because you can't specify a network interface + // ID in a Spot Fleet. LaunchTemplateSpecification *FleetLaunchTemplateSpecification `locationName:"launchTemplateSpecification" type:"structure"` // Any parameters that you specify override the same parameters in the launch @@ -135464,6 +136372,9 @@ func (s *LaunchTemplateConfig) SetOverrides(v []*LaunchTemplateOverrides) *Launc type LaunchTemplateCpuOptions struct { _ struct{} `type:"structure"` + // Indicates whether the instance is enabled for AMD SEV-SNP. + AmdSevSnp *string `locationName:"amdSevSnp" type:"string" enum:"AmdSevSnpSpecification"` + // The number of CPU cores for the instance. CoreCount *int64 `locationName:"coreCount" type:"integer"` @@ -135489,6 +136400,12 @@ func (s LaunchTemplateCpuOptions) GoString() string { return s.String() } +// SetAmdSevSnp sets the AmdSevSnp field's value. +func (s *LaunchTemplateCpuOptions) SetAmdSevSnp(v string) *LaunchTemplateCpuOptions { + s.AmdSevSnp = &v + return s +} + // SetCoreCount sets the CoreCount field's value. func (s *LaunchTemplateCpuOptions) SetCoreCount(v int64) *LaunchTemplateCpuOptions { s.CoreCount = &v @@ -135506,6 +136423,10 @@ func (s *LaunchTemplateCpuOptions) SetThreadsPerCore(v int64) *LaunchTemplateCpu type LaunchTemplateCpuOptionsRequest struct { _ struct{} `type:"structure"` + // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is + // supported with M6a, R6a, and C6a instance types only. + AmdSevSnp *string `type:"string" enum:"AmdSevSnpSpecification"` + // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` @@ -135532,6 +136453,12 @@ func (s LaunchTemplateCpuOptionsRequest) GoString() string { return s.String() } +// SetAmdSevSnp sets the AmdSevSnp field's value. +func (s *LaunchTemplateCpuOptionsRequest) SetAmdSevSnp(v string) *LaunchTemplateCpuOptionsRequest { + s.AmdSevSnp = &v + return s +} + // SetCoreCount sets the CoreCount field's value. func (s *LaunchTemplateCpuOptionsRequest) SetCoreCount(v int64) *LaunchTemplateCpuOptionsRequest { s.CoreCount = &v @@ -137075,8 +138002,8 @@ type LaunchTemplatePlacement struct { // Reserved for future use. SpreadDomain *string `locationName:"spreadDomain" type:"string"` - // The tenancy of the instance (if the instance is running in a VPC). An instance - // with a tenancy of dedicated runs on single-tenant hardware. + // The tenancy of the instance. An instance with a tenancy of dedicated runs + // on single-tenant hardware. Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` } @@ -137184,8 +138111,8 @@ type LaunchTemplatePlacementRequest struct { // Reserved for future use. SpreadDomain *string `type:"string"` - // The tenancy of the instance (if the instance is running in a VPC). An instance - // with a tenancy of dedicated runs on single-tenant hardware. + // The tenancy of the instance. An instance with a tenancy of dedicated runs + // on single-tenant hardware. Tenancy *string `type:"string" enum:"Tenancy"` } @@ -141270,10 +142197,9 @@ type ModifyInstanceAttributeInput struct { // a PV instance can make it unreachable. EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` - // [EC2-VPC] Replaces the security groups of the instance with the specified - // security groups. You must specify at least one security group, even if it's - // just the default security group for the VPC. You must specify the security - // group ID, not the security group name. + // Replaces the security groups of the instance with the specified security + // groups. You must specify the ID of at least one security group, even if it's + // just the default security group for the VPC. Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` // The ID of the instance. @@ -142261,7 +143187,8 @@ type ModifyInstancePlacementInput struct { // The ID of the Dedicated Host with which to associate the instance. HostId *string `locationName:"hostId" type:"string"` - // The ARN of the host resource group in which to place the instance. + // The ARN of the host resource group in which to place the instance. The instance + // must have a tenancy of host to specify this parameter. HostResourceGroupArn *string `type:"string"` // The ID of the instance that you are modifying. @@ -142275,9 +143202,10 @@ type ModifyInstancePlacementInput struct { // The tenancy for the instance. // - // For T3 instances, you can't change the tenancy from dedicated to host, or - // from host to dedicated. Attempting to make one of these unsupported tenancy - // changes results in the InvalidTenancy error code. + // For T3 instances, you must launch the instance on a Dedicated Host to use + // a tenancy of host. You can't change the tenancy from host to dedicated or + // default. Attempting to make one of these unsupported tenancy changes results + // in an InvalidRequest error code. Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"` } @@ -145484,7 +146412,8 @@ func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v strin return s } -// Options for a network-interface type Verified Access endpoint. +// Describes the options when modifying a Verified Access endpoint with the +// network-interface type. type ModifyVerifiedAccessEndpointEniOptions struct { _ struct{} `type:"structure"` @@ -145546,7 +146475,7 @@ type ModifyVerifiedAccessEndpointInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access endpoint. + // A description for the Verified Access endpoint. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -145555,19 +146484,18 @@ type ModifyVerifiedAccessEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The load balancer details if creating the Amazon Web Services Verified Access - // endpoint as load-balancertype. + // The load balancer details if creating the Verified Access endpoint as load-balancertype. LoadBalancerOptions *ModifyVerifiedAccessEndpointLoadBalancerOptions `type:"structure"` // The network interface options. NetworkInterfaceOptions *ModifyVerifiedAccessEndpointEniOptions `type:"structure"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. // // VerifiedAccessEndpointId is a required field VerifiedAccessEndpointId *string `type:"string" required:"true"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. VerifiedAccessGroupId *string `type:"string"` } @@ -145721,7 +146649,7 @@ func (s *ModifyVerifiedAccessEndpointLoadBalancerOptions) SetSubnetIds(v []*stri type ModifyVerifiedAccessEndpointOutput struct { _ struct{} `type:"structure"` - // The Amazon Web Services Verified Access endpoint details. + // The Verified Access endpoint details. VerifiedAccessEndpoint *VerifiedAccessEndpoint `locationName:"verifiedAccessEndpoint" type:"structure"` } @@ -145763,7 +146691,7 @@ type ModifyVerifiedAccessEndpointPolicyInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `type:"string"` // The status of the Verified Access policy. @@ -145771,7 +146699,7 @@ type ModifyVerifiedAccessEndpointPolicyInput struct { // PolicyEnabled is a required field PolicyEnabled *bool `type:"boolean" required:"true"` - // The ID of the Amazon Web Services Verified Access endpoint. + // The ID of the Verified Access endpoint. // // VerifiedAccessEndpointId is a required field VerifiedAccessEndpointId *string `type:"string" required:"true"` @@ -145844,7 +146772,7 @@ func (s *ModifyVerifiedAccessEndpointPolicyInput) SetVerifiedAccessEndpointId(v type ModifyVerifiedAccessEndpointPolicyOutput struct { _ struct{} `type:"structure"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `locationName:"policyDocument" type:"string"` // The status of the Verified Access policy. @@ -145889,7 +146817,7 @@ type ModifyVerifiedAccessGroupInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access group. + // A description for the Verified Access group. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -145898,12 +146826,12 @@ type ModifyVerifiedAccessGroupInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. // // VerifiedAccessGroupId is a required field VerifiedAccessGroupId *string `type:"string" required:"true"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstanceId *string `type:"string"` } @@ -145971,7 +146899,7 @@ func (s *ModifyVerifiedAccessGroupInput) SetVerifiedAccessInstanceId(v string) * type ModifyVerifiedAccessGroupOutput struct { _ struct{} `type:"structure"` - // Details of Amazon Web Services Verified Access group. + // Details of Verified Access group. VerifiedAccessGroup *VerifiedAccessGroup `locationName:"verifiedAccessGroup" type:"structure"` } @@ -146013,7 +146941,7 @@ type ModifyVerifiedAccessGroupPolicyInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `type:"string"` // The status of the Verified Access policy. @@ -146021,7 +146949,7 @@ type ModifyVerifiedAccessGroupPolicyInput struct { // PolicyEnabled is a required field PolicyEnabled *bool `type:"boolean" required:"true"` - // The ID of the Amazon Web Services Verified Access group. + // The ID of the Verified Access group. // // VerifiedAccessGroupId is a required field VerifiedAccessGroupId *string `type:"string" required:"true"` @@ -146094,7 +147022,7 @@ func (s *ModifyVerifiedAccessGroupPolicyInput) SetVerifiedAccessGroupId(v string type ModifyVerifiedAccessGroupPolicyOutput struct { _ struct{} `type:"structure"` - // The Amazon Web Services Verified Access policy document. + // The Verified Access policy document. PolicyDocument *string `locationName:"policyDocument" type:"string"` // The status of the Verified Access policy. @@ -146139,7 +147067,7 @@ type ModifyVerifiedAccessInstanceInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access instance. + // A description for the Verified Access instance. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -146148,7 +147076,7 @@ type ModifyVerifiedAccessInstanceInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. // // VerifiedAccessInstanceId is a required field VerifiedAccessInstanceId *string `type:"string" required:"true"` @@ -146212,7 +147140,7 @@ func (s *ModifyVerifiedAccessInstanceInput) SetVerifiedAccessInstanceId(v string type ModifyVerifiedAccessInstanceLoggingConfigurationInput struct { _ struct{} `type:"structure"` - // The configuration options for Amazon Web Services Verified Access instances. + // The configuration options for Verified Access instances. // // AccessLogs is a required field AccessLogs *VerifiedAccessLogOptions `type:"structure" required:"true"` @@ -146228,7 +147156,7 @@ type ModifyVerifiedAccessInstanceLoggingConfigurationInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. // // VerifiedAccessInstanceId is a required field VerifiedAccessInstanceId *string `type:"string" required:"true"` @@ -146300,7 +147228,7 @@ func (s *ModifyVerifiedAccessInstanceLoggingConfigurationInput) SetVerifiedAcces type ModifyVerifiedAccessInstanceLoggingConfigurationOutput struct { _ struct{} `type:"structure"` - // The logging configuration for Amazon Web Services Verified Access instance. + // The logging configuration for the Verified Access instance. LoggingConfiguration *VerifiedAccessInstanceLoggingConfiguration `locationName:"loggingConfiguration" type:"structure"` } @@ -146331,7 +147259,7 @@ func (s *ModifyVerifiedAccessInstanceLoggingConfigurationOutput) SetLoggingConfi type ModifyVerifiedAccessInstanceOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access instance. + // The ID of the Verified Access instance. VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"` } @@ -146367,7 +147295,7 @@ type ModifyVerifiedAccessTrustProviderInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` - // A description for the Amazon Web Services Verified Access trust provider. + // A description for the Verified Access trust provider. Description *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -146376,10 +147304,10 @@ type ModifyVerifiedAccessTrustProviderInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The OpenID Connect details for an oidc-type, user-identity based trust provider. + // The options for an OpenID Connect-compatible user-identity trust provider. OidcOptions *ModifyVerifiedAccessTrustProviderOidcOptions `type:"structure"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. // // VerifiedAccessTrustProviderId is a required field VerifiedAccessTrustProviderId *string `type:"string" required:"true"` @@ -146446,14 +147374,36 @@ func (s *ModifyVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderI return s } -// OpenID Connect options for an oidc-type, user-identity based trust provider. +// Options for an OpenID Connect-compatible user-identity trust provider. type ModifyVerifiedAccessTrustProviderOidcOptions struct { _ struct{} `type:"structure"` + // The OIDC authorization endpoint. + AuthorizationEndpoint *string `type:"string"` + + // The client identifier. + ClientId *string `type:"string"` + + // The client secret. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ModifyVerifiedAccessTrustProviderOidcOptions's + // String and GoString methods. + ClientSecret *string `type:"string" sensitive:"true"` + + // The OIDC issuer. + Issuer *string `type:"string"` + // OpenID Connect (OIDC) scopes are used by an application during authentication // to authorize access to a user's details. Each scope returns a specific set // of user attributes. Scope *string `type:"string"` + + // The OIDC token endpoint. + TokenEndpoint *string `type:"string"` + + // The OIDC user info endpoint. + UserInfoEndpoint *string `type:"string"` } // String returns the string representation. @@ -146474,16 +147424,52 @@ func (s ModifyVerifiedAccessTrustProviderOidcOptions) GoString() string { return s.String() } +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetAuthorizationEndpoint(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetClientId(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetClientSecret(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { + s.ClientSecret = &v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetIssuer(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { + s.Issuer = &v + return s +} + // SetScope sets the Scope field's value. func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetScope(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { s.Scope = &v return s } +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetTokenEndpoint(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { + s.TokenEndpoint = &v + return s +} + +// SetUserInfoEndpoint sets the UserInfoEndpoint field's value. +func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetUserInfoEndpoint(v string) *ModifyVerifiedAccessTrustProviderOidcOptions { + s.UserInfoEndpoint = &v + return s +} + type ModifyVerifiedAccessTrustProviderOutput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Web Services Verified Access trust provider. + // The ID of the Verified Access trust provider. VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"` } @@ -148172,8 +149158,12 @@ type ModifyVpnTunnelOptionsInput struct { // The tunnel options to modify. // + // TunnelOptions is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ModifyVpnTunnelOptionsInput's + // String and GoString methods. + // // TunnelOptions is a required field - TunnelOptions *ModifyVpnTunnelOptionsSpecification `type:"structure" required:"true"` + TunnelOptions *ModifyVpnTunnelOptionsSpecification `type:"structure" required:"true" sensitive:"true"` // The ID of the Amazon Web Services Site-to-Site VPN connection. // @@ -148286,7 +149276,7 @@ func (s *ModifyVpnTunnelOptionsOutput) SetVpnConnection(v *VpnConnection) *Modif // The Amazon Web Services Site-to-Site VPN tunnel options to modify. type ModifyVpnTunnelOptionsSpecification struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" sensitive:"true"` // The action to take after DPD timeout occurs. Specify restart to restart the // IKE initiation. Specify clear to end the IKE session. @@ -148371,7 +149361,11 @@ type ModifyVpnTunnelOptionsSpecification struct { // Constraints: Allowed characters are alphanumeric characters, periods (.), // and underscores (_). Must be between 8 and 64 characters in length and cannot // start with zero (0). - PreSharedKey *string `type:"string"` + // + // PreSharedKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ModifyVpnTunnelOptionsSpecification's + // String and GoString methods. + PreSharedKey *string `type:"string" sensitive:"true"` // The percentage of the rekey window (determined by RekeyMarginTimeSeconds) // during which the rekey time is randomly selected. @@ -148912,16 +149906,13 @@ func (s *MoveByoipCidrToIpamOutput) SetByoipCidr(v *ByoipCidr) *MoveByoipCidrToI return s } -// Describes the status of a moving Elastic IP address. +// This action is deprecated. // -// We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic -// to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) -// in the Amazon Elastic Compute Cloud User Guide. +// Describes the status of a moving Elastic IP address. type MovingAddressStatus struct { _ struct{} `type:"structure"` - // The status of the Elastic IP address that's being moved to the EC2-VPC platform, - // or restored to the EC2-Classic platform. + // The status of the Elastic IP address that's being moved or restored. MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"` // The Elastic IP address. @@ -151149,7 +152140,40 @@ func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration { return s } -// Options for OIDC-based, user-identity type trust provider. +// Describes the supported NitroTPM versions for the instance type. +type NitroTpmInfo struct { + _ struct{} `type:"structure"` + + // Indicates the supported NitroTPM versions. + SupportedVersions []*string `locationName:"supportedVersions" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NitroTpmInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NitroTpmInfo) GoString() string { + return s.String() +} + +// SetSupportedVersions sets the SupportedVersions field's value. +func (s *NitroTpmInfo) SetSupportedVersions(v []*string) *NitroTpmInfo { + s.SupportedVersions = v + return s +} + +// Describes the options for an OpenID Connect-compatible user-identity trust +// provider. type OidcOptions struct { _ struct{} `type:"structure"` @@ -151160,7 +152184,11 @@ type OidcOptions struct { ClientId *string `locationName:"clientId" type:"string"` // The client secret. - ClientSecret *string `locationName:"clientSecret" type:"string"` + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by OidcOptions's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` // The OIDC issuer. Issuer *string `locationName:"issuer" type:"string"` @@ -152725,8 +153753,8 @@ type Placement struct { // Reserved for future use. SpreadDomain *string `locationName:"spreadDomain" type:"string"` - // The tenancy of the instance (if the instance is running in a VPC). An instance - // with a tenancy of dedicated runs on single-tenant hardware. + // The tenancy of the instance. An instance with a tenancy of dedicated runs + // on single-tenant hardware. // // This parameter is not supported for CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). // The host tenancy is not supported for ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) @@ -153729,6 +154757,11 @@ type ProcessorInfo struct { // The architectures supported by the instance type. SupportedArchitectures []*string `locationName:"supportedArchitectures" locationNameList:"item" type:"list" enum:"ArchitectureType"` + // Indicates whether the instance type supports AMD SEV-SNP. If the request + // returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. + // For more information, see AMD SEV-SNP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html). + SupportedFeatures []*string `locationName:"supportedFeatures" locationNameList:"item" type:"list" enum:"SupportedAdditionalProcessorFeature"` + // The speed of the processor, in GHz. SustainedClockSpeedInGhz *float64 `locationName:"sustainedClockSpeedInGhz" type:"double"` } @@ -153757,6 +154790,12 @@ func (s *ProcessorInfo) SetSupportedArchitectures(v []*string) *ProcessorInfo { return s } +// SetSupportedFeatures sets the SupportedFeatures field's value. +func (s *ProcessorInfo) SetSupportedFeatures(v []*string) *ProcessorInfo { + s.SupportedFeatures = v + return s +} + // SetSustainedClockSpeedInGhz sets the SustainedClockSpeedInGhz field's value. func (s *ProcessorInfo) SetSustainedClockSpeedInGhz(v float64) *ProcessorInfo { s.SustainedClockSpeedInGhz = &v @@ -155631,7 +156670,9 @@ type RegisterInstanceEventNotificationAttributesInput struct { DryRun *bool `type:"boolean"` // Information about the tag keys to register. - InstanceTagAttribute *RegisterInstanceTagAttributeRequest `type:"structure"` + // + // InstanceTagAttribute is a required field + InstanceTagAttribute *RegisterInstanceTagAttributeRequest `type:"structure" required:"true"` } // String returns the string representation. @@ -155652,6 +156693,19 @@ func (s RegisterInstanceEventNotificationAttributesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterInstanceEventNotificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterInstanceEventNotificationAttributesInput"} + if s.InstanceTagAttribute == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceTagAttribute")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDryRun sets the DryRun field's value. func (s *RegisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *RegisterInstanceEventNotificationAttributesInput { s.DryRun = &v @@ -156431,7 +157485,7 @@ func (s *RejectVpcPeeringConnectionOutput) SetReturn(v bool) *RejectVpcPeeringCo type ReleaseAddressInput struct { _ struct{} `type:"structure"` - // [EC2-VPC] The allocation ID. Required for EC2-VPC. + // The allocation ID. This parameter is required. AllocationId *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -156450,7 +157504,7 @@ type ReleaseAddressInput struct { // operation on EC2 classic, you receive an InvalidParameterCombination error. NetworkBorderGroup *string `type:"string"` - // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + // Deprecated. PublicIp *string `type:"string"` } @@ -158114,7 +159168,7 @@ func (s *RequestIpamResourceTag) SetValue(v string) *RequestIpamResourceTag { // // You must specify at least one parameter for the launch template data. type RequestLaunchTemplateData struct { - _ struct{} `type:"structure" sensitive:"true"` + _ struct{} `type:"structure"` // The block device mapping. BlockDeviceMappings []*LaunchTemplateBlockDeviceMappingRequest `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` @@ -158188,8 +159242,14 @@ type RequestLaunchTemplateData struct { // // * resolve:ssm:parameter-name:label // - // For more information, see Use a Systems Manager parameter to find an AMI - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) + // * resolve:ssm:public-parameter + // + // Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager + // parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, + // you must specify the AMI ID. + // + // For more information, see Use a Systems Manager parameter instead of an AMI + // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id) // in the Amazon Elastic Compute Cloud User Guide. ImageId *string `type:"string"` @@ -158301,7 +159361,11 @@ type RequestLaunchTemplateData struct { // must be provided in the MIME multi-part archive format (https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive). // For more information, see Amazon EC2 user data in launch templates (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html) // in the Batch User Guide. - UserData *string `type:"string"` + // + // UserData is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RequestLaunchTemplateData's + // String and GoString methods. + UserData *string `type:"string" sensitive:"true"` } // String returns the string representation. @@ -158859,7 +159923,7 @@ func (s *RequestSpotInstancesInput) SetValidUntil(v time.Time) *RequestSpotInsta type RequestSpotInstancesOutput struct { _ struct{} `type:"structure"` - // One or more Spot Instance requests. + // The Spot Instance requests. SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` } @@ -158894,8 +159958,8 @@ type RequestSpotLaunchSpecification struct { // Deprecated. AddressingType *string `locationName:"addressingType" type:"string"` - // One or more block device mapping entries. You can't specify both a snapshot - // ID and an encryption value. This is because only blank volumes can be encrypted + // The block device mapping entries. You can't specify both a snapshot ID and + // an encryption value. This is because only blank volumes can be encrypted // on creation. If a snapshot is the basis for a volume, it is not blank and // its encryption status is used for the volume encryption status. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` @@ -158929,8 +159993,8 @@ type RequestSpotLaunchSpecification struct { // Default: Disabled Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` - // One or more network interfaces. If you specify a network interface, you must - // specify subnet IDs and security group IDs using the network interface. + // The network interfaces. If you specify a network interface, you must specify + // subnet IDs and security group IDs using the network interface. NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"` // The placement information for the instance. @@ -158939,12 +160003,10 @@ type RequestSpotLaunchSpecification struct { // The ID of the RAM disk. RamdiskId *string `locationName:"ramdiskId" type:"string"` - // One or more security group IDs. + // The IDs of the security groups. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` - // One or more security groups. When requesting instances in a VPC, you must - // specify the IDs of the security groups. When requesting instances in EC2-Classic, - // you can specify the names or the IDs of the security groups. + // Not supported. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` // The ID of the subnet in which to launch the instance. @@ -159094,7 +160156,7 @@ func (s *RequestSpotLaunchSpecification) SetUserData(v string) *RequestSpotLaunc type Reservation struct { _ struct{} `type:"structure"` - // [EC2-Classic only] The security groups. + // Not supported. Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // The instances. @@ -159614,8 +160676,7 @@ type ReservedInstancesConfiguration struct { // The instance type for the modified Reserved Instances. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` - // The network platform of the modified Reserved Instances, which is either - // EC2-Classic or EC2-VPC. + // The network platform of the modified Reserved Instances. Platform *string `locationName:"platform" type:"string"` // Whether the Reserved Instance is applied to instances in a Region or instances @@ -162944,8 +164005,9 @@ type RunInstancesInput struct { // same instance. EnclaveOptions *EnclaveOptionsRequest `type:"structure"` - // Indicates whether an instance is enabled for hibernation. For more information, - // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // Indicates whether an instance is enabled for hibernation. This parameter + // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). + // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. // // You can't enable hibernation and Amazon Web Services Nitro Enclaves on the @@ -162974,12 +164036,18 @@ type RunInstancesInput struct { // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon EC2 User Guide. // + // When you change your EBS-backed instance type, instance restart or replacement + // behavior depends on the instance type compatibility between the old and new + // types. An instance that's backed by an instance store volume is always replaced. + // For more information, see Change the instance type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-resize.html) + // in the Amazon EC2 User Guide. + // // Default: m1.small InstanceType *string `type:"string" enum:"InstanceType"` - // [EC2-VPC] The number of IPv6 addresses to associate with the primary network - // interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. - // You cannot specify this option and the option to assign specific IPv6 addresses + // The number of IPv6 addresses to associate with the primary network interface. + // Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You + // cannot specify this option and the option to assign specific IPv6 addresses // in the same request. You can specify this option if you've specified a minimum // number of instances to launch. // @@ -162987,10 +164055,10 @@ type RunInstancesInput struct { // request. Ipv6AddressCount *int64 `type:"integer"` - // [EC2-VPC] The IPv6 addresses from the range of the subnet to associate with - // the primary network interface. You cannot specify this option and the option - // to assign a number of IPv6 addresses in the same request. You cannot specify - // this option if you've specified a minimum number of instances to launch. + // The IPv6 addresses from the range of the subnet to associate with the primary + // network interface. You cannot specify this option and the option to assign + // a number of IPv6 addresses in the same request. You cannot specify this option + // if you've specified a minimum number of instances to launch. // // You cannot specify this option and the network interfaces option in the same // request. @@ -163064,8 +164132,8 @@ type RunInstancesInput struct { // the subnet. PrivateDnsNameOptions *PrivateDnsNameOptionsRequest `type:"structure"` - // [EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 - // address range of the subnet. + // The primary IPv4 address. You must specify a value from the IPv4 address + // range of the subnet. // // Only one private IP address can be designated as primary. You can't specify // this option if you've specified the option to designate a private IP address @@ -163093,7 +164161,7 @@ type RunInstancesInput struct { // as part of the network interface. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // [EC2-Classic, default VPC] The names of the security groups. + // [Default VPC] The names of the security groups. // // If you specify a network interface, you must specify any security groups // as part of the network interface. @@ -163101,7 +164169,7 @@ type RunInstancesInput struct { // Default: Amazon EC2 uses the default security group. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` - // [EC2-VPC] The ID of the subnet to launch the instance into. + // The ID of the subnet to launch the instance into. // // If you specify a network interface, you must specify any subnets as part // of the network interface. @@ -163769,7 +164837,7 @@ type ScheduledInstance struct { // The instance type. InstanceType *string `locationName:"instanceType" type:"string"` - // The network platform (EC2-Classic or EC2-VPC). + // The network platform. NetworkPlatform *string `locationName:"networkPlatform" type:"string"` // The time for the next schedule to start. @@ -163934,7 +165002,7 @@ type ScheduledInstanceAvailability struct { // The minimum term. The only possible value is 365 days. MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"` - // The network platform (EC2-Classic or EC2-VPC). + // The network platform. NetworkPlatform *string `locationName:"networkPlatform" type:"string"` // The platform (Linux/UNIX or Windows). @@ -166628,7 +167696,11 @@ type SnapshotDetail struct { StatusMessage *string `locationName:"statusMessage" type:"string"` // The URL used to access the disk image. - Url *string `locationName:"url" type:"string"` + // + // Url is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SnapshotDetail's + // String and GoString methods. + Url *string `locationName:"url" type:"string" sensitive:"true"` // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` @@ -166726,7 +167798,11 @@ type SnapshotDiskContainer struct { // The URL to the Amazon S3-based disk image being imported. It can either be // a https URL (https://..) or an Amazon S3 URL (s3://..). - Url *string `type:"string"` + // + // Url is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SnapshotDiskContainer's + // String and GoString methods. + Url *string `type:"string" sensitive:"true"` // The Amazon S3 bucket for the disk image. UserBucket *UserBucket `type:"structure"` @@ -167001,7 +168077,11 @@ type SnapshotTaskDetail struct { StatusMessage *string `locationName:"statusMessage" type:"string"` // The URL of the disk image from which the snapshot is created. - Url *string `locationName:"url" type:"string"` + // + // Url is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SnapshotTaskDetail's + // String and GoString methods. + Url *string `locationName:"url" type:"string" sensitive:"true"` // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` @@ -167423,9 +168503,7 @@ type SpotFleetLaunchSpecification struct { // Resource Center and search for the kernel ID. RamdiskId *string `locationName:"ramdiskId" type:"string"` - // One or more security groups. When requesting instances in a VPC, you must - // specify the IDs of the security groups. When requesting instances in EC2-Classic, - // you can specify the names or the IDs of the security groups. + // The security groups. SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -175275,7 +176353,11 @@ type TunnelOption struct { // The pre-shared key (PSK) to establish initial authentication between the // virtual private gateway and the customer gateway. - PreSharedKey *string `locationName:"preSharedKey" type:"string"` + // + // PreSharedKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by TunnelOption's + // String and GoString methods. + PreSharedKey *string `locationName:"preSharedKey" type:"string" sensitive:"true"` // The percentage of the rekey window determined by RekeyMarginTimeSeconds during // which the rekey time is randomly selected. @@ -177529,16 +178611,24 @@ func (s *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) SetEnabled(v bo return s } -// Describes the destinations for Verified Access logs. +// Options for Verified Access logs. type VerifiedAccessLogOptions struct { _ struct{} `type:"structure"` // Sends Verified Access logs to CloudWatch Logs. CloudWatchLogs *VerifiedAccessLogCloudWatchLogsDestinationOptions `type:"structure"` + // Include trust data sent by trust providers into the logs. + IncludeTrustContext *bool `type:"boolean"` + // Sends Verified Access logs to Kinesis. KinesisDataFirehose *VerifiedAccessLogKinesisDataFirehoseDestinationOptions `type:"structure"` + // The logging version to use. + // + // Valid values: ocsf-0.1 | ocsf-1.0.0-rc.2 + LogVersion *string `type:"string"` + // Sends Verified Access logs to Amazon S3. S3 *VerifiedAccessLogS3DestinationOptions `type:"structure"` } @@ -177592,12 +178682,24 @@ func (s *VerifiedAccessLogOptions) SetCloudWatchLogs(v *VerifiedAccessLogCloudWa return s } +// SetIncludeTrustContext sets the IncludeTrustContext field's value. +func (s *VerifiedAccessLogOptions) SetIncludeTrustContext(v bool) *VerifiedAccessLogOptions { + s.IncludeTrustContext = &v + return s +} + // SetKinesisDataFirehose sets the KinesisDataFirehose field's value. func (s *VerifiedAccessLogOptions) SetKinesisDataFirehose(v *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) *VerifiedAccessLogOptions { s.KinesisDataFirehose = v return s } +// SetLogVersion sets the LogVersion field's value. +func (s *VerifiedAccessLogOptions) SetLogVersion(v string) *VerifiedAccessLogOptions { + s.LogVersion = &v + return s +} + // SetS3 sets the S3 field's value. func (s *VerifiedAccessLogOptions) SetS3(v *VerifiedAccessLogS3DestinationOptions) *VerifiedAccessLogOptions { s.S3 = v @@ -177746,16 +178848,22 @@ func (s *VerifiedAccessLogS3DestinationOptions) SetPrefix(v string) *VerifiedAcc return s } -// Describes the destinations for Verified Access logs. +// Describes the options for Verified Access logs. type VerifiedAccessLogs struct { _ struct{} `type:"structure"` // CloudWatch Logs logging destination. CloudWatchLogs *VerifiedAccessLogCloudWatchLogsDestination `locationName:"cloudWatchLogs" type:"structure"` + // Describes current setting for including trust data into the logs. + IncludeTrustContext *bool `locationName:"includeTrustContext" type:"boolean"` + // Kinesis logging destination. KinesisDataFirehose *VerifiedAccessLogKinesisDataFirehoseDestination `locationName:"kinesisDataFirehose" type:"structure"` + // Describes current setting for the logging version. + LogVersion *string `locationName:"logVersion" type:"string"` + // Amazon S3 logging options. S3 *VerifiedAccessLogS3Destination `locationName:"s3" type:"structure"` } @@ -177784,12 +178892,24 @@ func (s *VerifiedAccessLogs) SetCloudWatchLogs(v *VerifiedAccessLogCloudWatchLog return s } +// SetIncludeTrustContext sets the IncludeTrustContext field's value. +func (s *VerifiedAccessLogs) SetIncludeTrustContext(v bool) *VerifiedAccessLogs { + s.IncludeTrustContext = &v + return s +} + // SetKinesisDataFirehose sets the KinesisDataFirehose field's value. func (s *VerifiedAccessLogs) SetKinesisDataFirehose(v *VerifiedAccessLogKinesisDataFirehoseDestination) *VerifiedAccessLogs { s.KinesisDataFirehose = v return s } +// SetLogVersion sets the LogVersion field's value. +func (s *VerifiedAccessLogs) SetLogVersion(v string) *VerifiedAccessLogs { + s.LogVersion = &v + return s +} + // SetS3 sets the S3 field's value. func (s *VerifiedAccessLogs) SetS3(v *VerifiedAccessLogS3Destination) *VerifiedAccessLogs { s.S3 = v @@ -177806,7 +178926,7 @@ type VerifiedAccessTrustProvider struct { // A description for the Amazon Web Services Verified Access trust provider. Description *string `locationName:"description" type:"string"` - // The options for device-identity type trust provider. + // The options for device-identity trust provider. DeviceOptions *DeviceOptions `locationName:"deviceOptions" type:"structure"` // The type of device-based trust provider. @@ -177815,7 +178935,7 @@ type VerifiedAccessTrustProvider struct { // The last updated time. LastUpdatedTime *string `locationName:"lastUpdatedTime" type:"string"` - // The OpenID Connect details for an oidc-type, user-identity based trust provider. + // The options for an OpenID Connect-compatible user-identity trust provider. OidcOptions *OidcOptions `locationName:"oidcOptions" type:"structure"` // The identifier to be used when working with policy rules. @@ -179844,7 +180964,11 @@ type VpnConnection struct { // the native XML format). This element is always present in the CreateVpnConnection // response; however, it's present in the DescribeVpnConnections response only // if the VPN connection is in the pending or available state. - CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"` + // + // CustomerGatewayConfiguration is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by VpnConnection's + // String and GoString methods. + CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string" sensitive:"true"` // The ID of the customer gateway at your end of the VPN connection. CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` @@ -180594,7 +181718,11 @@ type VpnTunnelOptionsSpecification struct { // Constraints: Allowed characters are alphanumeric characters, periods (.), // and underscores (_). Must be between 8 and 64 characters in length and cannot // start with zero (0). - PreSharedKey *string `type:"string"` + // + // PreSharedKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by VpnTunnelOptionsSpecification's + // String and GoString methods. + PreSharedKey *string `type:"string" sensitive:"true"` // The percentage of the rekey window (determined by RekeyMarginTimeSeconds) // during which the rekey time is randomly selected. @@ -181167,6 +182295,22 @@ func AllowsMultipleInstanceTypes_Values() []string { } } +const ( + // AmdSevSnpSpecificationEnabled is a AmdSevSnpSpecification enum value + AmdSevSnpSpecificationEnabled = "enabled" + + // AmdSevSnpSpecificationDisabled is a AmdSevSnpSpecification enum value + AmdSevSnpSpecificationDisabled = "disabled" +) + +// AmdSevSnpSpecification_Values returns all elements of the AmdSevSnpSpecification enum +func AmdSevSnpSpecification_Values() []string { + return []string{ + AmdSevSnpSpecificationEnabled, + AmdSevSnpSpecificationDisabled, + } +} + const ( // AnalysisStatusRunning is a AnalysisStatus enum value AnalysisStatusRunning = "running" @@ -182487,6 +183631,38 @@ func EbsOptimizedSupport_Values() []string { } } +const ( + // Ec2InstanceConnectEndpointStateCreateInProgress is a Ec2InstanceConnectEndpointState enum value + Ec2InstanceConnectEndpointStateCreateInProgress = "create-in-progress" + + // Ec2InstanceConnectEndpointStateCreateComplete is a Ec2InstanceConnectEndpointState enum value + Ec2InstanceConnectEndpointStateCreateComplete = "create-complete" + + // Ec2InstanceConnectEndpointStateCreateFailed is a Ec2InstanceConnectEndpointState enum value + Ec2InstanceConnectEndpointStateCreateFailed = "create-failed" + + // Ec2InstanceConnectEndpointStateDeleteInProgress is a Ec2InstanceConnectEndpointState enum value + Ec2InstanceConnectEndpointStateDeleteInProgress = "delete-in-progress" + + // Ec2InstanceConnectEndpointStateDeleteComplete is a Ec2InstanceConnectEndpointState enum value + Ec2InstanceConnectEndpointStateDeleteComplete = "delete-complete" + + // Ec2InstanceConnectEndpointStateDeleteFailed is a Ec2InstanceConnectEndpointState enum value + Ec2InstanceConnectEndpointStateDeleteFailed = "delete-failed" +) + +// Ec2InstanceConnectEndpointState_Values returns all elements of the Ec2InstanceConnectEndpointState enum +func Ec2InstanceConnectEndpointState_Values() []string { + return []string{ + Ec2InstanceConnectEndpointStateCreateInProgress, + Ec2InstanceConnectEndpointStateCreateComplete, + Ec2InstanceConnectEndpointStateCreateFailed, + Ec2InstanceConnectEndpointStateDeleteInProgress, + Ec2InstanceConnectEndpointStateDeleteComplete, + Ec2InstanceConnectEndpointStateDeleteFailed, + } +} + const ( // ElasticGpuStateAttached is a ElasticGpuState enum value ElasticGpuStateAttached = "ATTACHED" @@ -185600,6 +186776,72 @@ const ( // InstanceTypeR6idnMetal is a InstanceType enum value InstanceTypeR6idnMetal = "r6idn.metal" + + // InstanceTypeInf2Xlarge is a InstanceType enum value + InstanceTypeInf2Xlarge = "inf2.xlarge" + + // InstanceTypeInf28xlarge is a InstanceType enum value + InstanceTypeInf28xlarge = "inf2.8xlarge" + + // InstanceTypeInf224xlarge is a InstanceType enum value + InstanceTypeInf224xlarge = "inf2.24xlarge" + + // InstanceTypeInf248xlarge is a InstanceType enum value + InstanceTypeInf248xlarge = "inf2.48xlarge" + + // InstanceTypeTrn1n32xlarge is a InstanceType enum value + InstanceTypeTrn1n32xlarge = "trn1n.32xlarge" + + // InstanceTypeI4gLarge is a InstanceType enum value + InstanceTypeI4gLarge = "i4g.large" + + // InstanceTypeI4gXlarge is a InstanceType enum value + InstanceTypeI4gXlarge = "i4g.xlarge" + + // InstanceTypeI4g2xlarge is a InstanceType enum value + InstanceTypeI4g2xlarge = "i4g.2xlarge" + + // InstanceTypeI4g4xlarge is a InstanceType enum value + InstanceTypeI4g4xlarge = "i4g.4xlarge" + + // InstanceTypeI4g8xlarge is a InstanceType enum value + InstanceTypeI4g8xlarge = "i4g.8xlarge" + + // InstanceTypeI4g16xlarge is a InstanceType enum value + InstanceTypeI4g16xlarge = "i4g.16xlarge" + + // InstanceTypeHpc7g4xlarge is a InstanceType enum value + InstanceTypeHpc7g4xlarge = "hpc7g.4xlarge" + + // InstanceTypeHpc7g8xlarge is a InstanceType enum value + InstanceTypeHpc7g8xlarge = "hpc7g.8xlarge" + + // InstanceTypeHpc7g16xlarge is a InstanceType enum value + InstanceTypeHpc7g16xlarge = "hpc7g.16xlarge" + + // InstanceTypeC7gnMedium is a InstanceType enum value + InstanceTypeC7gnMedium = "c7gn.medium" + + // InstanceTypeC7gnLarge is a InstanceType enum value + InstanceTypeC7gnLarge = "c7gn.large" + + // InstanceTypeC7gnXlarge is a InstanceType enum value + InstanceTypeC7gnXlarge = "c7gn.xlarge" + + // InstanceTypeC7gn2xlarge is a InstanceType enum value + InstanceTypeC7gn2xlarge = "c7gn.2xlarge" + + // InstanceTypeC7gn4xlarge is a InstanceType enum value + InstanceTypeC7gn4xlarge = "c7gn.4xlarge" + + // InstanceTypeC7gn8xlarge is a InstanceType enum value + InstanceTypeC7gn8xlarge = "c7gn.8xlarge" + + // InstanceTypeC7gn12xlarge is a InstanceType enum value + InstanceTypeC7gn12xlarge = "c7gn.12xlarge" + + // InstanceTypeC7gn16xlarge is a InstanceType enum value + InstanceTypeC7gn16xlarge = "c7gn.16xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -186248,6 +187490,28 @@ func InstanceType_Values() []string { InstanceTypeM6idnMetal, InstanceTypeR6inMetal, InstanceTypeR6idnMetal, + InstanceTypeInf2Xlarge, + InstanceTypeInf28xlarge, + InstanceTypeInf224xlarge, + InstanceTypeInf248xlarge, + InstanceTypeTrn1n32xlarge, + InstanceTypeI4gLarge, + InstanceTypeI4gXlarge, + InstanceTypeI4g2xlarge, + InstanceTypeI4g4xlarge, + InstanceTypeI4g8xlarge, + InstanceTypeI4g16xlarge, + InstanceTypeHpc7g4xlarge, + InstanceTypeHpc7g8xlarge, + InstanceTypeHpc7g16xlarge, + InstanceTypeC7gnMedium, + InstanceTypeC7gnLarge, + InstanceTypeC7gnXlarge, + InstanceTypeC7gn2xlarge, + InstanceTypeC7gn4xlarge, + InstanceTypeC7gn8xlarge, + InstanceTypeC7gn12xlarge, + InstanceTypeC7gn16xlarge, } } @@ -187571,6 +188835,38 @@ func NetworkInterfaceType_Values() []string { } } +const ( + // NitroEnclavesSupportUnsupported is a NitroEnclavesSupport enum value + NitroEnclavesSupportUnsupported = "unsupported" + + // NitroEnclavesSupportSupported is a NitroEnclavesSupport enum value + NitroEnclavesSupportSupported = "supported" +) + +// NitroEnclavesSupport_Values returns all elements of the NitroEnclavesSupport enum +func NitroEnclavesSupport_Values() []string { + return []string{ + NitroEnclavesSupportUnsupported, + NitroEnclavesSupportSupported, + } +} + +const ( + // NitroTpmSupportUnsupported is a NitroTpmSupport enum value + NitroTpmSupportUnsupported = "unsupported" + + // NitroTpmSupportSupported is a NitroTpmSupport enum value + NitroTpmSupportSupported = "supported" +) + +// NitroTpmSupport_Values returns all elements of the NitroTpmSupport enum +func NitroTpmSupport_Values() []string { + return []string{ + NitroTpmSupportUnsupported, + NitroTpmSupportSupported, + } +} + const ( // OfferingClassTypeStandard is a OfferingClassType enum value OfferingClassTypeStandard = "standard" @@ -188438,6 +189734,9 @@ const ( // ResourceTypeIpamResourceDiscoveryAssociation is a ResourceType enum value ResourceTypeIpamResourceDiscoveryAssociation = "ipam-resource-discovery-association" + + // ResourceTypeInstanceConnectEndpoint is a ResourceType enum value + ResourceTypeInstanceConnectEndpoint = "instance-connect-endpoint" ) // ResourceType_Values returns all elements of the ResourceType enum @@ -188528,6 +189827,7 @@ func ResourceType_Values() []string { ResourceTypeVpcBlockPublicAccessExclusion, ResourceTypeIpamResourceDiscovery, ResourceTypeIpamResourceDiscoveryAssociation, + ResourceTypeInstanceConnectEndpoint, } } @@ -189123,6 +190423,18 @@ func SummaryStatus_Values() []string { } } +const ( + // SupportedAdditionalProcessorFeatureAmdSevSnp is a SupportedAdditionalProcessorFeature enum value + SupportedAdditionalProcessorFeatureAmdSevSnp = "amd-sev-snp" +) + +// SupportedAdditionalProcessorFeature_Values returns all elements of the SupportedAdditionalProcessorFeature enum +func SupportedAdditionalProcessorFeature_Values() []string { + return []string{ + SupportedAdditionalProcessorFeatureAmdSevSnp, + } +} + const ( // TargetCapacityUnitTypeVcpu is a TargetCapacityUnitType enum value TargetCapacityUnitTypeVcpu = "vcpu" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index 5b5395356..621712d29 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -11,6 +11,9 @@ import ( ) const ( + // ec2CopySnapshotPresignedUrlCustomization handler name + ec2CopySnapshotPresignedUrlCustomization = "ec2CopySnapshotPresignedUrl" + // customRetryerMinRetryDelay sets min retry delay customRetryerMinRetryDelay = 1 * time.Second @@ -21,7 +24,10 @@ const ( func init() { initRequest = func(r *request.Request) { if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter - r.Handlers.Build.PushFront(fillPresignedURL) + r.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: ec2CopySnapshotPresignedUrlCustomization, + Fn: fillPresignedURL, + }) } // only set the retryer on request if config doesn't have a retryer @@ -48,13 +54,15 @@ func fillPresignedURL(r *request.Request) { origParams := r.Params.(*CopySnapshotInput) - // Stop if PresignedURL/DestinationRegion is set - if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil { + // Stop if PresignedURL is set + if origParams.PresignedUrl != nil { return } + // Always use config region as destination region for SDKs origParams.DestinationRegion = r.Config.Region - newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput) + + newParams := awsutil.CopyOf(origParams).(*CopySnapshotInput) // Create a new request based on the existing request. We will use this to // presign the CopySnapshot request against the source region. @@ -82,8 +90,12 @@ func fillPresignedURL(r *request.Request) { clientInfo.Endpoint = resolved.URL clientInfo.SigningRegion = resolved.SigningRegion + // Copy handlers without Presigned URL customization to avoid an infinite loop + handlersWithoutPresignCustomization := r.Handlers.Copy() + handlersWithoutPresignCustomization.Build.RemoveByName(ec2CopySnapshotPresignedUrlCustomization) + // Presign a CopySnapshot request with modified params - req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) + req := request.New(*cfg, clientInfo, handlersWithoutPresignCustomization, r.Retryer, r.Operation, newParams, r.Data) url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough. if err != nil { // bubble error back up to original request r.Error = err diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go index fce3db782..a596fcd8d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go @@ -7603,7 +7603,7 @@ func (c *Lightsail) GetCertificatesRequest(input *GetCertificatesInput) (req *re // // Returns information about one or more Amazon Lightsail SSL/TLS certificates. // -// To get a summary of a certificate, ommit includeCertificateDetails from your +// To get a summary of a certificate, omit includeCertificateDetails from your // request. The response will include only the certificate Amazon Resource Name // (ARN), certificate name, domain name, and tags. // @@ -19655,7 +19655,7 @@ func (s *CacheSettings) SetMinimumTTL(v int64) *CacheSettings { // Describes the full details of an Amazon Lightsail SSL/TLS certificate. // -// To get a summary of a certificate, use the GetCertificates action and ommit +// To get a summary of a certificate, use the GetCertificates action and omit // includeCertificateDetails from your request. The response will include only // the certificate Amazon Resource Name (ARN), certificate name, domain name, // and tags. @@ -30007,6 +30007,13 @@ type GetCertificatesInput struct { // When omitted, the response includes only the certificate names, Amazon Resource // Names (ARNs), domain names, and tags. IncludeCertificateDetails *bool `locationName:"includeCertificateDetails" type:"boolean"` + + // The token to advance to the next page of results from your request. + // + // To get a page token, perform an initial GetCertificates request. If your + // results are paginated, the response will return a next page token that you + // can specify as the page token in a subsequent request. + PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation. @@ -30045,11 +30052,23 @@ func (s *GetCertificatesInput) SetIncludeCertificateDetails(v bool) *GetCertific return s } +// SetPageToken sets the PageToken field's value. +func (s *GetCertificatesInput) SetPageToken(v string) *GetCertificatesInput { + s.PageToken = &v + return s +} + type GetCertificatesOutput struct { _ struct{} `type:"structure"` // An object that describes certificates. Certificates []*CertificateSummary `locationName:"certificates" type:"list"` + + // If NextPageToken is returned there are more results available. The value + // of NextPageToken is a unique pagination token for each page. Make the call + // again using the returned token to retrieve the next page. Keep all other + // arguments unchanged. + NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation. @@ -30076,6 +30095,12 @@ func (s *GetCertificatesOutput) SetCertificates(v []*CertificateSummary) *GetCer return s } +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetCertificatesOutput) SetNextPageToken(v string) *GetCertificatesOutput { + s.NextPageToken = &v + return s +} + type GetCloudFormationStackRecordsInput struct { _ struct{} `type:"structure"` @@ -30965,7 +30990,7 @@ type GetCostEstimateInput struct { // // * Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time format. For example, if you wish to use an + // * Specified in the Unix time format. For example, if you want to use an // end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end // time. // @@ -30986,7 +31011,7 @@ type GetCostEstimateInput struct { // // * Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time format. For example, if you wish to use a + // * Specified in the Unix time format. For example, if you want to use a // start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the // start time. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 000000000..c743913c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,1682 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns an access token for the authorized client. The access +// token issued will be used to fetch short-term credentials for the assigned +// roles in the AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for each client. This value should come from + // the persisted result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a + // token. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from + // an in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports grant types for the authorization code, refresh token, and device + // code request. For device code requests, specify the following value: + // + // urn:ietf:params:oauth:grant-type:device_code + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // The location of the application that will receive the authorization code. + // Users authorize the service to send the request to this location. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Currently, refreshToken is not yet implemented and is not supported. For + // more information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // The token used to obtain an access token in the event that the access token + // is invalid or expired. + RefreshToken *string `locationName:"refreshToken" type:"string"` + + // The list of scopes that is defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // An opaque token to access IAM Identity Center resources assigned to a user. + AccessToken *string `locationName:"accessToken" type:"string"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Currently, idToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // The identifier of the user that associated with the access token, if present. + IdToken *string `locationName:"idToken" type:"string"` + + // Currently, refreshToken is not yet implemented and is not supported. For + // more information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string `locationName:"refreshToken" type:"string"` + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + ClientSecret *string `locationName:"clientSecret" type:"string"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // The endpoint where the client can get an access token. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + + // The URL for the AWS access portal. For more information, see Using the AWS + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 000000000..8b5ee6019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,66 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect +// (OIDC) is a web service that enables a client (such as AWS CLI or a native +// application) to register with IAM Identity Center. The service also enables +// the client to fetch the user’s access token upon successful authentication +// and authorization with IAM Identity Center. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the AWS CLI. Support for other OIDC flows +// frequently needed for native applications, such as Authorization Code +// Flow (+ PKCE), will be addressed in future releases. +// +// - The service emits only OIDC access tokens, such that obtaining a new +// token (For example, token refresh) requires explicit user re-authentication. +// +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with +// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials +// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 000000000..698377012 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 000000000..969f33c37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssooidc" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 63729d0a7..7ac6b93f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -85,9 +85,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// When you create a role, you create two policies: A role trust policy that -// specifies who can assume the role and a permissions policy that specifies -// what can be done with the role. You specify the trusted principal who is +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is // allowed to assume the role in the role trust policy. // // To assume a role from a different account, your Amazon Web Services account @@ -96,9 +96,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. // // To allow a user to assume a role in the same account, you can do either of // the following: @@ -517,10 +517,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // a user. You can also supply the user with a consistent identity throughout // the lifetime of an application. // -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web // Services security credentials. Therefore, you can distribute an application @@ -984,11 +982,11 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // call the operation. // // No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1063,18 +1061,26 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // GetFederationToken API operation for AWS Security Token Service. // // Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. +// // You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, // or an OpenID Connect-compatible identity provider. In this case, we recommend @@ -1083,21 +1089,13 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// // # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default // session duration is 43,200 seconds (12 hours). Temporary credentials obtained -// by using the Amazon Web Services account root user credentials have a maximum -// duration of 3,600 seconds (1 hour). +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). // // # Permissions // @@ -1267,12 +1265,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // or IAM user. The credentials consist of an access key ID, a secret access // key, and a security token. Typically, you use GetSessionToken if you want // to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) @@ -1287,13 +1286,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. // // # Permissions // @@ -1305,20 +1303,20 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // // - You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. // -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1900,8 +1898,12 @@ type AssumeRoleWithSAMLInput struct { // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2036,7 +2038,7 @@ type AssumeRoleWithSAMLOutput struct { // IAM. // // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. + // a user. // // The following pseudocode shows how the hash value is calculated: // @@ -2266,8 +2268,12 @@ type AssumeRoleWithWebIdentityInput struct { // the user who is using your application with a web identity provider before // the application makes an AssumeRoleWithWebIdentity call. // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. + // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2573,8 +2579,12 @@ type Credentials struct { // The secret access key that can be used to sign requests. // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` // The token that users must pass to the service API to use the temporary credentials. // @@ -2922,10 +2932,9 @@ type GetFederationTokenInput struct { // The duration, in seconds, that the session should last. Acceptable durations // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index c40f5a2a5..ea1d9eb0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -4,10 +4,9 @@ // requests to AWS Security Token Service. // // Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 7233c389b..9c1849b2c 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,12 @@ # Change Log +## [v1.99.0] - 2023-04-24 + +- #616 - @bentranter - Bump CI version for Go 1.20 +- #615 - @bentranter - Remove beta support for tokens API +- #604 - @dvigueras - Add support for "Validate a Container Registry Name" +- #613 - @ibilalkayy - updated the README file by showing up the build status icon + ## [v1.98.0] - 2023-03-09 - #608 - @anitgandhi - client: don't process body upon 204 response diff --git a/vendor/github.com/digitalocean/godo/README.md b/vendor/github.com/digitalocean/godo/README.md index 9a3ec2dad..4c9ee2d78 100644 --- a/vendor/github.com/digitalocean/godo/README.md +++ b/vendor/github.com/digitalocean/godo/README.md @@ -1,6 +1,6 @@ # Godo -[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo) +[![GitHub Actions CI](https://github.com/digitalocean/godo/actions/workflows/ci.yml/badge.svg)](https://github.com/digitalocean/godo/actions/workflows/ci.yml) [![GoDoc](https://godoc.org/github.com/digitalocean/godo?status.svg)](https://godoc.org/github.com/digitalocean/godo) Godo is a Go client library for accessing the DigitalOcean V2 API. diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 14fac268c..c48a5f788 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.98.0" + libraryVersion = "1.99.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -81,7 +81,6 @@ type Client struct { Storage StorageService StorageActions StorageActionsService Tags TagsService - Tokens TokensService UptimeChecks UptimeChecksService VPCs VPCsService @@ -252,7 +251,6 @@ func NewClient(httpClient *http.Client) *Client { c.Storage = &StorageServiceOp{client: c} c.StorageActions = &StorageActionsServiceOp{client: c} c.Tags = &TagsServiceOp{client: c} - c.Tokens = &TokensServiceOp{client: c} c.UptimeChecks = &UptimeChecksServiceOp{client: c} c.VPCs = &VPCsServiceOp{client: c} diff --git a/vendor/github.com/digitalocean/godo/registry.go b/vendor/github.com/digitalocean/godo/registry.go index 2fe9d2bd9..b0c243281 100644 --- a/vendor/github.com/digitalocean/godo/registry.go +++ b/vendor/github.com/digitalocean/godo/registry.go @@ -37,6 +37,7 @@ type RegistryService interface { GetOptions(context.Context) (*RegistryOptions, *Response, error) GetSubscription(context.Context) (*RegistrySubscription, *Response, error) UpdateSubscription(context.Context, *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error) + ValidateName(context.Context, *RegistryValidateNameRequest) (*Response, error) } var _ RegistryService = &RegistryServiceOp{} @@ -233,6 +234,12 @@ type RegistrySubscriptionUpdateRequest struct { TierSlug string `json:"tier_slug"` } +// RegistryValidateNameRequest represents a request to validate that a +// container registry name is available for use. +type RegistryValidateNameRequest struct { + Name string `json:"name"` +} + // Get retrieves the details of a Registry. func (svc *RegistryServiceOp) Get(ctx context.Context) (*Registry, *Response, error) { req, err := svc.client.NewRequest(ctx, http.MethodGet, registryPath, nil) @@ -589,3 +596,17 @@ func (svc *RegistryServiceOp) UpdateSubscription(ctx context.Context, request *R } return root.Subscription, resp, nil } + +// ValidateName validates that a container registry name is available for use. +func (svc *RegistryServiceOp) ValidateName(ctx context.Context, request *RegistryValidateNameRequest) (*Response, error) { + path := fmt.Sprintf("%s/validate-name", registryPath) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, request) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/tokens.go b/vendor/github.com/digitalocean/godo/tokens.go deleted file mode 100644 index 13aa418df..000000000 --- a/vendor/github.com/digitalocean/godo/tokens.go +++ /dev/null @@ -1,228 +0,0 @@ -package godo - -import ( - "context" - "fmt" - "net/http" - "time" -) - -const ( - accessTokensBasePath = "v2/tokens" - tokenScopesBasePath = accessTokensBasePath + "/scopes" -) - -// TokensService is an interface for managing DigitalOcean API access tokens. -// It is not currently generally available. Follow the release notes for -// updates: https://docs.digitalocean.com/release-notes/api/ -type TokensService interface { - List(context.Context, *ListOptions) ([]Token, *Response, error) - Get(context.Context, int) (*Token, *Response, error) - Create(context.Context, *TokenCreateRequest) (*Token, *Response, error) - Update(context.Context, int, *TokenUpdateRequest) (*Token, *Response, error) - Revoke(context.Context, int) (*Response, error) - ListScopes(context.Context, *ListOptions) ([]TokenScope, *Response, error) - ListScopesByNamespace(context.Context, string, *ListOptions) ([]TokenScope, *Response, error) -} - -// TokensServiceOp handles communication with the tokens related methods of the -// DigitalOcean API. -type TokensServiceOp struct { - client *Client -} - -var _ TokensService = &TokensServiceOp{} - -// Token represents a DigitalOcean API token. -type Token struct { - ID int `json:"id"` - Name string `json:"name"` - Scopes []string `json:"scopes"` - ExpirySeconds *int `json:"expiry_seconds"` - CreatedAt time.Time `json:"created_at"` - LastUsedAt string `json:"last_used_at"` - - // AccessToken contains the actual Oauth token string. It is only included - // in the create response. - AccessToken string `json:"access_token,omitempty"` -} - -// tokenRoot represents a response from the DigitalOcean API -type tokenRoot struct { - Token *Token `json:"token"` -} - -type tokensRoot struct { - Tokens []Token `json:"tokens"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -// TokenCreateRequest represents a request to create a token. -type TokenCreateRequest struct { - Name string `json:"name"` - Scopes []string `json:"scopes"` - ExpirySeconds *int `json:"expiry_seconds,omitempty"` -} - -// TokenUpdateRequest represents a request to update a token. -type TokenUpdateRequest struct { - Name string `json:"name,omitempty"` - Scopes []string `json:"scopes,omitempty"` -} - -// TokenScope is a representation of a scope for the public API. -type TokenScope struct { - Name string `json:"name"` -} - -type tokenScopesRoot struct { - TokenScopes []TokenScope `json:"scopes"` - Links *Links `json:"links"` - Meta *Meta `json:"meta"` -} - -type tokenScopeNamespaceParam struct { - Namespace string `url:"namespace,omitempty"` -} - -// List all DigitalOcean API access tokens. -func (c TokensServiceOp) List(ctx context.Context, opt *ListOptions) ([]Token, *Response, error) { - path, err := addOptions(accessTokensBasePath, opt) - if err != nil { - return nil, nil, err - } - - req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(tokensRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.Tokens, resp, err -} - -// Get a specific DigitalOcean API access token. -func (c TokensServiceOp) Get(ctx context.Context, tokenID int) (*Token, *Response, error) { - path := fmt.Sprintf("%s/%d", accessTokensBasePath, tokenID) - req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(tokenRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Token, resp, err -} - -// Create a new DigitalOcean API access token. -func (c TokensServiceOp) Create(ctx context.Context, createRequest *TokenCreateRequest) (*Token, *Response, error) { - req, err := c.client.NewRequest(ctx, http.MethodPost, accessTokensBasePath, createRequest) - if err != nil { - return nil, nil, err - } - - root := new(tokenRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Token, resp, err -} - -// Update the name or scopes of a specific DigitalOcean API access token. -func (c TokensServiceOp) Update(ctx context.Context, tokenID int, updateRequest *TokenUpdateRequest) (*Token, *Response, error) { - path := fmt.Sprintf("%s/%d", accessTokensBasePath, tokenID) - req, err := c.client.NewRequest(ctx, http.MethodPatch, path, updateRequest) - if err != nil { - return nil, nil, err - } - - root := new(tokenRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - - return root.Token, resp, err -} - -// Revoke a specific DigitalOcean API access token. -func (c TokensServiceOp) Revoke(ctx context.Context, tokenID int) (*Response, error) { - path := fmt.Sprintf("%s/%d", accessTokensBasePath, tokenID) - req, err := c.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - - resp, err := c.client.Do(ctx, req, nil) - - return resp, err -} - -// ListScopes lists all available scopes that can be granted to a token. -func (c TokensServiceOp) ListScopes(ctx context.Context, opt *ListOptions) ([]TokenScope, *Response, error) { - path, err := addOptions(tokenScopesBasePath, opt) - if err != nil { - return nil, nil, err - } - - return listTokenScopes(ctx, c, path) -} - -// ListScopesByNamespace lists available scopes in a namespace that can be granted -// to a token (e.g. the namespace for the `droplet:read“ scope is `droplet`). -func (c TokensServiceOp) ListScopesByNamespace(ctx context.Context, namespace string, opt *ListOptions) ([]TokenScope, *Response, error) { - path, err := addOptions(tokenScopesBasePath, opt) - if err != nil { - return nil, nil, err - } - - namespaceOpt := tokenScopeNamespaceParam{ - Namespace: namespace, - } - - path, err = addOptions(path, namespaceOpt) - if err != nil { - return nil, nil, err - } - - return listTokenScopes(ctx, c, path) -} - -func listTokenScopes(ctx context.Context, c TokensServiceOp, path string) ([]TokenScope, *Response, error) { - req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, nil, err - } - - root := new(tokenScopesRoot) - resp, err := c.client.Do(ctx, req, root) - if err != nil { - return nil, resp, err - } - if l := root.Links; l != nil { - resp.Links = l - } - if m := root.Meta; m != nil { - resp.Meta = m - } - - return root.TokenScopes, resp, err -} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 02a73ccfd..352018e70 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,10 @@ # Change history of go-restful +## [v3.10.2] - 2023-03-09 + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + ## [v3.10.1] - 2022-11-19 - fix broken 3.10.0 by using path package for joining paths diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359dc..85da90128 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 830ebf148..827f471de 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -353,8 +353,28 @@ func (b *RouteBuilder) Build() Route { return route } -func concatPath(path1, path2 string) string { - return path.Join(path1, path2) +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go index 8578ce93f..076038fc0 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go @@ -8,6 +8,7 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" @@ -146,7 +147,7 @@ func (x *HealthStatusSet) GetStatuses() []HealthStatus { return nil } -// [#next-free-field: 25] +// [#next-free-field: 26] type HealthCheck struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -233,9 +234,19 @@ type HealthCheck struct { // // The default value for "healthy edge interval" is the same as the default interval. HealthyEdgeInterval *duration.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"` + // .. attention:: + // This field is deprecated in favor of the extension + // :ref:`event_logger ` and + // :ref:`event_log_path ` + // in the file sink extension. + // // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. + // + // Deprecated: Do not use. EventLogPath string `protobuf:"bytes,17,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"` + // A list of event log sinks to process the health check event. + // [#extension-category: envoy.health_check.event_sinks] + EventLogger []*TypedExtensionConfig `protobuf:"bytes,25,rep,name=event_logger,json=eventLogger,proto3" json:"event_logger,omitempty"` // [#not-implemented-hide:] // The gRPC service for the health check event service. // If empty, health check events won't be sent to a remote endpoint. @@ -444,6 +455,7 @@ func (x *HealthCheck) GetHealthyEdgeInterval() *duration.Duration { return nil } +// Deprecated: Do not use. func (x *HealthCheck) GetEventLogPath() string { if x != nil { return x.EventLogPath @@ -451,6 +463,13 @@ func (x *HealthCheck) GetEventLogPath() string { return "" } +func (x *HealthCheck) GetEventLogger() []*TypedExtensionConfig { + if x != nil { + return x.EventLogger + } + return nil +} + func (x *HealthCheck) GetEventService() *EventServiceConfig { if x != nil { return x.EventService @@ -1114,288 +1133,298 @@ var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, - 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, - 0x74, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, - 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, - 0x22, 0x91, 0x1e, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, - 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, - 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0f, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x08, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xed, 0x1e, 0x0a, 0x0b, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, + 0x01, 0x2a, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, + 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, + 0x40, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, + 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, + 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, + 0x13, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, + 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, + 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, + 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10, + 0x74, 0x63, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, + 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, - 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x12, 0x37, 0x0a, 0x08, 0x61, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x07, 0x61, 0x6c, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, - 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x5f, 0x0a, 0x11, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, - 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, - 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, - 0x0e, 0x74, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x5f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, - 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, - 0x0f, 0x67, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x65, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, - 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, - 0x66, 0x66, 0x69, 0x63, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, - 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x18, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, - 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x12, 0x52, 0x0a, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, + 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, - 0x00, 0x52, 0x11, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x12, 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x12, 0x57, 0x0a, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x00, 0x52, 0x18, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, + 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, + 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, - 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, - 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, - 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, - 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, - 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, - 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, - 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xcc, 0x07, 0x0a, 0x0f, 0x48, 0x74, - 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1f, 0x0a, - 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, - 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, - 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, - 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x67, - 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, + 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, + 0x52, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, + 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x18, + 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, - 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, - 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, + 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x4d, + 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, + 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, 0x01, + 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, 0x61, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, + 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0xcc, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, + 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x02, 0xc8, + 0x01, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, + 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, + 0x00, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, + 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, + 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, + 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, + 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, - 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x12, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, - 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, - 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x34, 0x9a, - 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, - 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x09, 0x75, - 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, - 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, + 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, - 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, - 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, - 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, - 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, + 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, + 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, - 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, 0x54, - 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, - 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, + 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, - 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, - 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, - 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, - 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, - 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, + 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, + 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, + 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, + 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, + 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, + 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, + 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, + 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, + 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, + 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1426,15 +1455,16 @@ var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{ (*duration.Duration)(nil), // 10: google.protobuf.Duration (*wrappers.UInt32Value)(nil), // 11: google.protobuf.UInt32Value (*wrappers.BoolValue)(nil), // 12: google.protobuf.BoolValue - (*EventServiceConfig)(nil), // 13: envoy.config.core.v3.EventServiceConfig - (*_struct.Struct)(nil), // 14: google.protobuf.Struct - (*wrappers.UInt64Value)(nil), // 15: google.protobuf.UInt64Value - (*HeaderValueOption)(nil), // 16: envoy.config.core.v3.HeaderValueOption - (*v3.Int64Range)(nil), // 17: envoy.type.v3.Int64Range - (v3.CodecClientType)(0), // 18: envoy.type.v3.CodecClientType - (*v31.StringMatcher)(nil), // 19: envoy.type.matcher.v3.StringMatcher - (RequestMethod)(0), // 20: envoy.config.core.v3.RequestMethod - (*any1.Any)(nil), // 21: google.protobuf.Any + (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig + (*EventServiceConfig)(nil), // 14: envoy.config.core.v3.EventServiceConfig + (*_struct.Struct)(nil), // 15: google.protobuf.Struct + (*wrappers.UInt64Value)(nil), // 16: google.protobuf.UInt64Value + (*HeaderValueOption)(nil), // 17: envoy.config.core.v3.HeaderValueOption + (*v3.Int64Range)(nil), // 18: envoy.type.v3.Int64Range + (v3.CodecClientType)(0), // 19: envoy.type.v3.CodecClientType + (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher + (RequestMethod)(0), // 21: envoy.config.core.v3.RequestMethod + (*any1.Any)(nil), // 22: google.protobuf.Any } var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.HealthStatusSet.statuses:type_name -> envoy.config.core.v3.HealthStatus @@ -1455,27 +1485,28 @@ var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ 10, // 15: envoy.config.core.v3.HealthCheck.unhealthy_interval:type_name -> google.protobuf.Duration 10, // 16: envoy.config.core.v3.HealthCheck.unhealthy_edge_interval:type_name -> google.protobuf.Duration 10, // 17: envoy.config.core.v3.HealthCheck.healthy_edge_interval:type_name -> google.protobuf.Duration - 13, // 18: envoy.config.core.v3.HealthCheck.event_service:type_name -> envoy.config.core.v3.EventServiceConfig - 9, // 19: envoy.config.core.v3.HealthCheck.tls_options:type_name -> envoy.config.core.v3.HealthCheck.TlsOptions - 14, // 20: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct - 3, // 21: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload - 3, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload - 15, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.response_buffer_size:type_name -> google.protobuf.UInt64Value - 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range - 17, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range - 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType - 19, // 28: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher - 20, // 29: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod - 3, // 30: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload - 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload - 16, // 32: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption - 21, // 33: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any - 34, // [34:34] is the sub-list for method output_type - 34, // [34:34] is the sub-list for method input_type - 34, // [34:34] is the sub-list for extension type_name - 34, // [34:34] is the sub-list for extension extendee - 0, // [0:34] is the sub-list for field type_name + 13, // 18: envoy.config.core.v3.HealthCheck.event_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 14, // 19: envoy.config.core.v3.HealthCheck.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 9, // 20: envoy.config.core.v3.HealthCheck.tls_options:type_name -> envoy.config.core.v3.HealthCheck.TlsOptions + 15, // 21: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct + 3, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.response_buffer_size:type_name -> google.protobuf.UInt64Value + 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 18, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range + 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range + 19, // 28: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType + 20, // 29: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 21, // 30: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod + 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 32: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 17, // 33: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption + 22, // 34: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 35, // [35:35] is the sub-list for extension extendee + 0, // [0:35] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_health_check_proto_init() } @@ -1485,6 +1516,7 @@ func file_envoy_config_core_v3_health_check_proto_init() { } file_envoy_config_core_v3_base_proto_init() file_envoy_config_core_v3_event_service_config_proto_init() + file_envoy_config_core_v3_extension_proto_init() if !protoimpl.UnsafeEnabled { file_envoy_config_core_v3_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HealthStatusSet); i { diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go index 5abf42ff3..837a3e1fa 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go @@ -609,6 +609,40 @@ func (m *HealthCheck) validate(all bool) error { // no validation rules for EventLogPath + for idx, item := range m.GetEventLogger() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if all { switch v := interface{}(m.GetEventService()).(type) { case interface{ ValidateAll() error }: diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE b/vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE deleted file mode 100644 index 60884a059..000000000 --- a/vendor/github.com/envoyproxy/protoc-gen-validate/NOTICE +++ /dev/null @@ -1,4 +0,0 @@ -protoc-gen-validate -Copyright 2019 Envoy Project Authors - -Licensed under Apache License 2.0. See LICENSE for terms. diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 000000000..be01c558e --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index 77f1f92c5..c13f3435f 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -112,7 +112,7 @@ func flattenComposite(errs *CompositeError) *CompositeError { for _, er := range errs.Errors { switch e := er.(type) { case *CompositeError: - if len(e.Errors) > 0 { + if e != nil && len(e.Errors) > 0 { flat := flattenComposite(e) if len(flat.Errors) > 0 { res = append(res, flat.Errors...) diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853de..de60dc7dd 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,26 +91,26 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() @@ -159,7 +159,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -210,7 +210,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -241,7 +241,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { @@ -363,6 +363,127 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err := drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err := drainSingle(dec); err != nil { + return 0, err + } + } + } + } + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return err + } + case '[': + if err := drainSingle(dec); err != nil { + return err + } + } + } + } + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index c458b49b2..582f0fd4c 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -16,6 +16,7 @@ package spec import ( "encoding/json" + "strconv" "strings" "github.com/go-openapi/jsonpointer" @@ -40,6 +41,24 @@ func (e Extensions) GetString(key string) (string, bool) { return "", false } +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + // GetBool gets a string value from the extensions func (e Extensions) GetBool(key string) (bool, bool) { if v, ok := e[strings.ToLower(key)]; ok { diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go index 2af13787a..91d2435f0 100644 --- a/vendor/github.com/go-openapi/spec/properties.go +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -42,8 +42,8 @@ func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { func (items OrderSchemaItems) Len() int { return len(items) } func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } func (items OrderSchemaItems) Less(i, j int) (ret bool) { - ii, oki := items[i].Extensions.GetString("x-order") - ij, okj := items[j].Extensions.GetString("x-order") + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") if oki { if okj { defer func() { @@ -56,7 +56,7 @@ func (items OrderSchemaItems) Less(i, j int) (ret bool) { ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() } }() - return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() + return ii < ij } return true } else if okj { diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 4efb6f868..16c3076fe 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -19,6 +19,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "github.com/go-openapi/swag" ) @@ -62,6 +63,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } @@ -107,20 +109,31 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } + if v, ok := res["default"]; ok { - r.Default = &v + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes delete(res, "default") } for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp } - r.StatusCodeResponses[nk] = v } } return nil diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index f78ab684a..d971fbe34 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -341,12 +341,21 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) switch v.Kind() { case reflect.String: return v.Len() == 0 @@ -358,14 +367,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false } // AddInitialisms add additional initialisms diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100644 index 000000000..e8134ec8b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1435 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +var ( + wktType = reflect.TypeOf((*isWkt)(nil)).Elem() + messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if v.Type().Implements(wktType) { + wkt := v.Interface().(isWkt) + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if v.Type().Implements(messageType) { + return checkRequiredFields(v.Interface().(proto.Message)) + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index c8a1beb8a..182c926b9 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -258,10 +258,10 @@ func (p *Profile) postDecode() error { // If this a main linux kernel mapping with a relocation symbol suffix // ("[kernel.kallsyms]_text"), extract said suffix. // It is fairly hacky to handle at this level, but the alternatives appear even worse. - if strings.HasPrefix(m.File, "[kernel.kallsyms]") { - m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] } - } functions := make(map[uint64]*Function, len(p.Function)) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 91d60a809..ef508417b 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.11.0" + "v2": "2.12.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index e17b196f6..ae7114947 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,13 @@ # Changelog +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + ## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 000000000..af15fb582 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,74 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 6488461f4..453fab7ec 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -31,9 +31,15 @@ package gax import ( "bytes" + "context" + "fmt" + "net/http" "runtime" "strings" "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" ) var ( @@ -117,3 +123,46 @@ func XGoogHeader(keyval ...string) string { } return buf.String()[1:] } + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 374dcdb11..7425b5ffb 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.11.0" +const Version = "2.12.0" diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 9e2567b98..e19d5af51 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,3 +1,39 @@ +## v1.5.0 (2023-06-21) + +New features and improvements: + +* [GH-2634](https://github.com/gophercloud/gophercloud/pull/2634) baremetal: update inspection inventory with recent additions +* [GH-2635](https://github.com/gophercloud/gophercloud/pull/2635) [manila]: Add Share Replicas support +* [GH-2637](https://github.com/gophercloud/gophercloud/pull/2637) [FWaaS_v2]: Add FWaaS_V2 workflow and enable tests +* [GH-2639](https://github.com/gophercloud/gophercloud/pull/2639) Implement errors.Unwrap() on unexpected status code errors +* [GH-2648](https://github.com/gophercloud/gophercloud/pull/2648) [manila]: implement share transfer API + + +## v1.4.0 (2023-05-25) + +New features and improvements: + +* [GH-2465](https://github.com/gophercloud/gophercloud/pull/2465) keystone: add v3 limits update operation +* [GH-2596](https://github.com/gophercloud/gophercloud/pull/2596) keystone: add v3 limits get operation +* [GH-2618](https://github.com/gophercloud/gophercloud/pull/2618) keystone: add v3 limits delete operation +* [GH-2616](https://github.com/gophercloud/gophercloud/pull/2616) Add CRUD support for register limit APIs +* [GH-2610](https://github.com/gophercloud/gophercloud/pull/2610) Add PUT/HEAD/DELETE for identity/v3/OS-INHERIT +* [GH-2597](https://github.com/gophercloud/gophercloud/pull/2597) Add validation and optimise objects.BulkDelete +* [GH-2602](https://github.com/gophercloud/gophercloud/pull/2602) [swift v1]: introduce a TempURLKey argument for objects.CreateTempURLOpts struct +* [GH-2623](https://github.com/gophercloud/gophercloud/pull/2623) Add the ability to remove ingress/egress policies from fwaas_v2 groups +* [GH-2625](https://github.com/gophercloud/gophercloud/pull/2625) neutron: Support trunk_details extension + +CI changes: + +* [GH-2608](https://github.com/gophercloud/gophercloud/pull/2608) Drop train and ussuri jobs +* [GH-2589](https://github.com/gophercloud/gophercloud/pull/2589) Bump EmilienM/devstack-action from 0.10 to 0.11 +* [GH-2604](https://github.com/gophercloud/gophercloud/pull/2604) Bump mheap/github-action-required-labels from 3 to 4 +* [GH-2620](https://github.com/gophercloud/gophercloud/pull/2620) Pin goimport dep to a version that works with go 1.14 +* [GH-2619](https://github.com/gophercloud/gophercloud/pull/2619) Fix version comparison for acceptance tests +* [GH-2627](https://github.com/gophercloud/gophercloud/pull/2627) Limits: Fix ToDo to create registered limit and use it +* [GH-2629](https://github.com/gophercloud/gophercloud/pull/2629) [manila]: Add share from snapshot restore functional test + + ## v1.3.0 (2023-03-28) * [GH-2464](https://github.com/gophercloud/gophercloud/pull/2464) keystone: add v3 limits create operation diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go index edba02bad..8ab592ca4 100644 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -116,61 +116,109 @@ type ErrDefault400 struct { ErrUnexpectedResponseCode } +func (e ErrDefault400) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault401 is the default error type returned on a 401 HTTP response code. type ErrDefault401 struct { ErrUnexpectedResponseCode } +func (e ErrDefault401) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault403 is the default error type returned on a 403 HTTP response code. type ErrDefault403 struct { ErrUnexpectedResponseCode } +func (e ErrDefault403) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault404 is the default error type returned on a 404 HTTP response code. type ErrDefault404 struct { ErrUnexpectedResponseCode } +func (e ErrDefault404) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault405 is the default error type returned on a 405 HTTP response code. type ErrDefault405 struct { ErrUnexpectedResponseCode } +func (e ErrDefault405) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault408 is the default error type returned on a 408 HTTP response code. type ErrDefault408 struct { ErrUnexpectedResponseCode } +func (e ErrDefault408) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault409 is the default error type returned on a 409 HTTP response code. type ErrDefault409 struct { ErrUnexpectedResponseCode } +func (e ErrDefault409) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault429 is the default error type returned on a 429 HTTP response code. type ErrDefault429 struct { ErrUnexpectedResponseCode } +func (e ErrDefault429) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault500 is the default error type returned on a 500 HTTP response code. type ErrDefault500 struct { ErrUnexpectedResponseCode } +func (e ErrDefault500) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault502 is the default error type returned on a 502 HTTP response code. type ErrDefault502 struct { ErrUnexpectedResponseCode } +func (e ErrDefault502) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault503 is the default error type returned on a 503 HTTP response code. type ErrDefault503 struct { ErrUnexpectedResponseCode } +func (e ErrDefault503) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + // ErrDefault504 is the default error type returned on a 504 HTTP response code. type ErrDefault504 struct { ErrUnexpectedResponseCode } +func (e ErrDefault504) Unwrap() error { + return e.ErrUnexpectedResponseCode +} + func (e ErrDefault400) Error() string { e.DefaultErrString = fmt.Sprintf( "Bad request with: [%s %s], error message: %s", diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index c603d6dbe..6cfb14fd7 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -14,7 +14,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v1.3.0" + DefaultUserAgent = "gophercloud/v1.5.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 3e13ccc88..93087cd31 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 7904e7b71..f45929cb5 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -104,7 +107,8 @@ type AgentService struct { Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` Partition string `json:",omitempty" bexpr:"-" hash:"ignore"` // Datacenter is only ever returned and is ignored if presented. - Datacenter string `json:",omitempty" bexpr:"-" hash:"ignore"` + Datacenter string `json:",omitempty" bexpr:"-" hash:"ignore"` + Locality *Locality `json:",omitempty" bexpr:"-" hash:"ignore"` } // AgentServiceChecksInfo returns information about a Service and its checks @@ -291,6 +295,7 @@ type AgentServiceRegistration struct { Connect *AgentServiceConnect `json:",omitempty"` Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` Partition string `json:",omitempty" bexpr:"-" hash:"ignore"` + Locality *Locality `json:",omitempty" bexpr:"-" hash:"ignore"` } // ServiceRegisterOpts is used to pass extra options to the service register. @@ -498,6 +503,24 @@ func (a *Agent) Host() (map[string]interface{}, error) { return out, nil } +// Version is used to retrieve information about the running Consul version and build. +func (a *Agent) Version() (map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/version") + _, resp, err := a.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + var out map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + // Metrics is used to query the agent we are speaking to for // its current internal metric data func (a *Agent) Metrics() (*MetricsInfo, error) { @@ -1050,8 +1073,17 @@ func (a *Agent) ForceLeavePrune(node string) error { // ForceLeaveOpts is used to have the agent eject a failed node or remove it // completely from the list of members. +// +// DEPRECATED - Use ForceLeaveOptions instead. func (a *Agent) ForceLeaveOpts(node string, opts ForceLeaveOpts) error { + return a.ForceLeaveOptions(node, opts, nil) +} + +// ForceLeaveOptions is used to have the agent eject a failed node or remove it +// completely from the list of members. Allows usage of QueryOptions on-top of ForceLeaveOpts +func (a *Agent) ForceLeaveOptions(node string, opts ForceLeaveOpts, q *QueryOptions) error { r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + r.setQueryOptions(q) if opts.Prune { r.params.Set("prune", "1") } diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 772f8693b..1fe0c71b6 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -833,12 +836,21 @@ func (r *request) setQueryOptions(q *QueryOptions) { return } if q.Namespace != "" { + // For backwards-compatibility with existing tests, + // use the short-hand query param name "ns" + // rather than the alternative long-hand "namespace" r.params.Set("ns", q.Namespace) } if q.Partition != "" { + // For backwards-compatibility with existing tests, + // use the long-hand query param name "partition" + // rather than the alternative short-hand "ap" r.params.Set("partition", q.Partition) } if q.Datacenter != "" { + // For backwards-compatibility with existing tests, + // use the short-hand query param name "dc" + // rather than the alternative long-hand "datacenter" r.params.Set("dc", q.Datacenter) } if q.Peer != "" { @@ -946,12 +958,16 @@ func (r *request) setWriteOptions(q *WriteOptions) { if q == nil { return } + // For backwards-compatibility, continue to use the shorthand "ns" + // rather than "namespace" if q.Namespace != "" { r.params.Set("ns", q.Namespace) } if q.Partition != "" { r.params.Set("partition", q.Partition) } + // For backwards-compatibility, continue to use the shorthand "dc" + // rather than "datacenter" if q.Datacenter != "" { r.params.Set("dc", q.Datacenter) } diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 84a2bdbc6..0040ca6e7 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -19,8 +22,9 @@ type Node struct { Meta map[string]string CreateIndex uint64 ModifyIndex uint64 - Partition string `json:",omitempty"` - PeerName string `json:",omitempty"` + Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` + Locality *Locality `json:",omitempty"` } type ServiceAddress struct { @@ -45,6 +49,7 @@ type CatalogService struct { ServiceWeights Weights ServiceEnableTagOverride bool ServiceProxy *AgentServiceConnectProxyConfig + ServiceLocality *Locality `json:",omitempty"` CreateIndex uint64 Checks HealthChecks ModifyIndex uint64 @@ -73,7 +78,8 @@ type CatalogRegistration struct { Check *AgentCheck Checks HealthChecks SkipNodeUpdate bool - Partition string `json:",omitempty"` + Partition string `json:",omitempty"` + Locality *Locality `json:",omitempty"` } type CatalogDeregistration struct { diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index 4e9682ee6..125619b55 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -23,6 +26,8 @@ const ( ServiceIntentions string = "service-intentions" MeshConfig string = "mesh" ExportedServices string = "exported-services" + SamenessGroup string = "sameness-group" + RateLimitIPConfig string = "control-plane-request-limit" ProxyConfigGlobal string = "global" MeshConfigMesh string = "mesh" @@ -30,11 +35,20 @@ const ( TCPRoute string = "tcp-route" InlineCertificate string = "inline-certificate" HTTPRoute string = "http-route" + JWTProvider string = "jwt-provider" ) const ( - BuiltinAWSLambdaExtension string = "builtin/aws/lambda" - BuiltinLuaExtension string = "builtin/lua" + BuiltinAWSLambdaExtension string = "builtin/aws/lambda" + BuiltinExtAuthzExtension string = "builtin/ext-authz" + BuiltinLuaExtension string = "builtin/lua" + BuiltinLocalRatelimitExtension string = "builtin/http/localratelimit" + BuiltinPropertyOverrideExtension string = "builtin/property-override" + BuiltinWasmExtension string = "builtin/wasm" + // BuiltinValidateExtension should not be exposed directly or accepted as a valid configured + // extension type, as it is only used indirectly via troubleshooting tools. It is included here + // for common reference alongside other builtin extensions. + BuiltinValidateExtension string = "builtin/proxy/validate" ) type ConfigEntry interface { @@ -102,6 +116,21 @@ type TransparentProxyConfig struct { DialedDirectly bool `json:",omitempty" alias:"dialed_directly"` } +type MutualTLSMode string + +const ( + // MutualTLSModeDefault represents no specific mode and should + // be used to indicate that a different layer of the configuration + // chain should take precedence. + MutualTLSModeDefault MutualTLSMode = "" + + // MutualTLSModeStrict requires mTLS for incoming traffic. + MutualTLSModeStrict MutualTLSMode = "strict" + + // MutualTLSModePermissive allows incoming non-mTLS traffic. + MutualTLSModePermissive MutualTLSMode = "permissive" +) + // ExposeConfig describes HTTP paths to expose through Envoy outside of Connect. // Users can expose individual paths and/or all HTTP/GRPC paths for checks. type ExposeConfig struct { @@ -115,9 +144,11 @@ type ExposeConfig struct { // EnvoyExtension has configuration for an extension that patches Envoy resources. type EnvoyExtension struct { - Name string - Required bool - Arguments map[string]interface{} `bexpr:"-"` + Name string + Required bool + Arguments map[string]interface{} `bexpr:"-"` + ConsulVersion string + EnvoyVersion string } type ExposePath struct { @@ -254,6 +285,15 @@ type PassiveHealthCheck struct { // when an outlier status is detected through consecutive 5xx. // This setting can be used to disable ejection or to ramp it up slowly. EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` + + // The maximum % of an upstream cluster that can be ejected due to outlier detection. + // Defaults to 10% but will eject at least one host regardless of the value. + MaxEjectionPercent *uint32 `json:",omitempty" alias:"max_ejection_percent"` + + // The base time that a host is ejected for. The real time is equal to the base time + // multiplied by the number of times the host has been ejected and is capped by + // max_ejection_time (Default 300s). Defaults to 30000ms or 30s. + BaseEjectionTime *time.Duration `json:",omitempty" alias:"base_ejection_time"` } // UpstreamLimits describes the limits that are associated with a specific @@ -283,6 +323,7 @@ type ServiceConfigEntry struct { Protocol string `json:",omitempty"` Mode ProxyMode `json:",omitempty"` TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MutualTLSMode MutualTLSMode `json:",omitempty" alias:"mutual_tls_mode"` MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` ExternalSNI string `json:",omitempty" alias:"external_sni"` @@ -307,17 +348,20 @@ func (s *ServiceConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex func (s *ServiceConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex } type ProxyConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` - Config map[string]interface{} `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - AccessLogs *AccessLogsConfig `json:",omitempty" alias:"access_logs"` - EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MutualTLSMode MutualTLSMode `json:",omitempty" alias:"mutual_tls_mode"` + Config map[string]interface{} `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + AccessLogs *AccessLogsConfig `json:",omitempty" alias:"access_logs"` + EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` + FailoverPolicy *ServiceResolverFailoverPolicy `json:",omitempty" alias:"failover_policy"` + PrioritizeByLocality *ServiceResolverPrioritizeByLocality `json:",omitempty" alias:"prioritize_by_locality"` Meta map[string]string `json:",omitempty"` CreateIndex uint64 @@ -354,6 +398,8 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &MeshConfigEntry{}, nil case ExportedServices: return &ExportedServicesConfigEntry{Name: name}, nil + case SamenessGroup: + return &SamenessGroupConfigEntry{Kind: kind, Name: name}, nil case APIGateway: return &APIGatewayConfigEntry{Kind: kind, Name: name}, nil case TCPRoute: @@ -362,6 +408,10 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &InlineCertificateConfigEntry{Kind: kind, Name: name}, nil case HTTPRoute: return &HTTPRouteConfigEntry{Kind: kind, Name: name}, nil + case RateLimitIPConfig: + return &RateLimitIPConfigEntry{Kind: kind, Name: name}, nil + case JWTProvider: + return &JWTProviderConfigEntry{Kind: kind, Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index acc05e13e..3696f7be5 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -169,6 +172,10 @@ type ServiceResolverConfigEntry struct { ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` + // PrioritizeByLocality controls whether the locality of services within the + // local partition will be used to prioritize connectivity. + PrioritizeByLocality *ServiceResolverPrioritizeByLocality `json:",omitempty" alias:"prioritize_by_locality"` + // LoadBalancer determines the load balancing policy and configuration for services // issuing requests to this upstream service. LoadBalancer *LoadBalancer `json:",omitempty" alias:"load_balancer"` @@ -234,15 +241,18 @@ type ServiceResolverRedirect struct { Partition string `json:",omitempty"` Datacenter string `json:",omitempty"` Peer string `json:",omitempty"` + SamenessGroup string `json:",omitempty" alias:"sameness_group"` } type ServiceResolverFailover struct { Service string `json:",omitempty"` ServiceSubset string `json:",omitempty" alias:"service_subset"` // Referencing other partitions is not supported. - Namespace string `json:",omitempty"` - Datacenters []string `json:",omitempty"` - Targets []ServiceResolverFailoverTarget `json:",omitempty"` + Namespace string `json:",omitempty"` + Datacenters []string `json:",omitempty"` + Targets []ServiceResolverFailoverTarget `json:",omitempty"` + Policy *ServiceResolverFailoverPolicy `json:",omitempty"` + SamenessGroup string `json:",omitempty" alias:"sameness_group"` } type ServiceResolverFailoverTarget struct { @@ -254,6 +264,20 @@ type ServiceResolverFailoverTarget struct { Peer string `json:",omitempty"` } +type ServiceResolverFailoverPolicy struct { + // Mode specifies the type of failover that will be performed. Valid values are + // "sequential", "" (equivalent to "sequential") and "order-by-locality". + Mode string `json:",omitempty"` + Regions []string `json:",omitempty"` +} + +type ServiceResolverPrioritizeByLocality struct { + // Mode specifies the type of prioritization that will be performed + // when selecting nodes in the local partition. + // Valid values are: "" (default "none"), "none", and "failover". + Mode string `json:",omitempty"` +} + // LoadBalancer determines the load balancing policy and configuration for services // issuing requests to this upstream service. type LoadBalancer struct { diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go index 52b0491f7..97920e40d 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "encoding/json" @@ -51,6 +54,9 @@ type ServiceConsumer struct { // Peer is the name of the peer to export the service to. Peer string `json:",omitempty" alias:"peer_name"` + + // SamenessGroup is the name of the sameness group to export the service to. + SamenessGroup string `json:",omitempty" alias:"sameness_group"` } func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go index 05e43832c..b59f1c062 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // IngressGatewayConfigEntry manages the configuration for an ingress service diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go b/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go index bbf12ccaa..47a1ead05 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // InlineCertificateConfigEntry -- TODO stub diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go index 0bff5e8e3..3f03b0875 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "time" @@ -9,6 +12,7 @@ type ServiceIntentionsConfigEntry struct { Namespace string `json:",omitempty"` Sources []*SourceIntention + JWT *IntentionJWTRequirement `json:",omitempty"` Meta map[string]string `json:",omitempty"` @@ -17,15 +21,16 @@ type ServiceIntentionsConfigEntry struct { } type SourceIntention struct { - Name string - Peer string `json:",omitempty"` - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Action IntentionAction `json:",omitempty"` - Permissions []*IntentionPermission `json:",omitempty"` - Precedence int - Type IntentionSourceType - Description string `json:",omitempty"` + Name string + Peer string `json:",omitempty"` + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + SamenessGroup string `json:",omitempty" alias:"sameness_group"` + Action IntentionAction `json:",omitempty"` + Permissions []*IntentionPermission `json:",omitempty"` + Precedence int + Type IntentionSourceType + Description string `json:",omitempty"` LegacyID string `json:",omitempty" alias:"legacy_id"` LegacyMeta map[string]string `json:",omitempty" alias:"legacy_meta"` @@ -44,6 +49,7 @@ func (e *ServiceIntentionsConfigEntry) GetModifyIndex() uint64 { return e.Mo type IntentionPermission struct { Action IntentionAction HTTP *IntentionHTTPPermission `json:",omitempty"` + JWT *IntentionJWTRequirement `json:",omitempty"` } type IntentionHTTPPermission struct { @@ -65,3 +71,30 @@ type IntentionHTTPHeaderPermission struct { Regex string `json:",omitempty"` Invert bool `json:",omitempty"` } + +type IntentionJWTRequirement struct { + // Providers is a list of providers to consider when verifying a JWT. + Providers []*IntentionJWTProvider `json:",omitempty"` +} + +type IntentionJWTProvider struct { + // Name is the name of the JWT provider. There MUST be a corresponding + // "jwt-provider" config entry with this name. + Name string `json:",omitempty"` + + // VerifyClaims is a list of additional claims to verify in a JWT's payload. + VerifyClaims []*IntentionJWTClaimVerification `json:",omitempty" alias:"verify_claims"` +} + +type IntentionJWTClaimVerification struct { + // Path is the path to the claim in the token JSON. + Path []string `json:",omitempty"` + + // Value is the expected value at the given path: + // - If the type at the path is a list then we verify + // that this value is contained in the list. + // + // - If the type at the path is a string then we verify + // that this value matches. + Value string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go new file mode 100644 index 000000000..e27974af3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "time" +) + +type JWTProviderConfigEntry struct { + // Kind is the kind of configuration entry and must be "jwt-provider". + Kind string `json:",omitempty"` + + // Name is the name of the provider being configured. + Name string `json:",omitempty"` + + // JSONWebKeySet defines a JSON Web Key Set, its location on disk, or the + // means with which to fetch a key set from a remote server. + JSONWebKeySet *JSONWebKeySet `json:",omitempty" alias:"json_web_key_set"` + + // Issuer is the entity that must have issued the JWT. + // This value must match the "iss" claim of the token. + Issuer string `json:",omitempty"` + + // Audiences is the set of audiences the JWT is allowed to access. + // If specified, all JWTs verified with this provider must address + // at least one of these to be considered valid. + Audiences []string `json:",omitempty"` + + // Locations where the JWT will be present in requests. + // Envoy will check all of these locations to extract a JWT. + // If no locations are specified Envoy will default to: + // 1. Authorization header with Bearer schema: + // "Authorization: Bearer " + // 2. access_token query parameter. + Locations []*JWTLocation `json:",omitempty"` + + // Forwarding defines rules for forwarding verified JWTs to the backend. + Forwarding *JWTForwardingConfig `json:",omitempty"` + + // ClockSkewSeconds specifies the maximum allowable time difference + // from clock skew when validating the "exp" (Expiration) and "nbf" + // (Not Before) claims. + // + // Default value is 30 seconds. + ClockSkewSeconds int `json:",omitempty" alias:"clock_skew_seconds"` + + // CacheConfig defines configuration for caching the validation + // result for previously seen JWTs. Caching results can speed up + // verification when individual tokens are expected to be handled + // multiple times. + CacheConfig *JWTCacheConfig `json:",omitempty" alias:"cache_config"` + + Meta map[string]string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 `json:",omitempty"` + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 `json:",omitempty"` + + // Partition is the partition the JWTProviderConfigEntry applies to. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the JWTProviderConfigEntry applies to. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +// JWTLocation is a location where the JWT could be present in requests. +// +// Only one of Header, QueryParam, or Cookie can be specified. +type JWTLocation struct { + // Header defines how to extract a JWT from an HTTP request header. + Header *JWTLocationHeader `json:",omitempty"` + + // QueryParam defines how to extract a JWT from an HTTP request + // query parameter. + QueryParam *JWTLocationQueryParam `json:",omitempty" alias:"query_param"` + + // Cookie defines how to extract a JWT from an HTTP request cookie. + Cookie *JWTLocationCookie `json:",omitempty"` +} + +// JWTLocationHeader defines how to extract a JWT from an HTTP +// request header. +type JWTLocationHeader struct { + // Name is the name of the header containing the token. + Name string `json:",omitempty"` + + // ValuePrefix is an optional prefix that precedes the token in the + // header value. + // For example, "Bearer " is a standard value prefix for a header named + // "Authorization", but the prefix is not part of the token itself: + // "Authorization: Bearer " + ValuePrefix string `json:",omitempty" alias:"value_prefix"` + + // Forward defines whether the header with the JWT should be + // forwarded after the token has been verified. If false, the + // header will not be forwarded to the backend. + // + // Default value is false. + Forward bool `json:",omitempty"` +} + +// JWTLocationQueryParam defines how to extract a JWT from an HTTP request query parameter. +type JWTLocationQueryParam struct { + // Name is the name of the query param containing the token. + Name string `json:",omitempty"` +} + +// JWTLocationCookie defines how to extract a JWT from an HTTP request cookie. +type JWTLocationCookie struct { + // Name is the name of the cookie containing the token. + Name string `json:",omitempty"` +} + +type JWTForwardingConfig struct { + // HeaderName is a header name to use when forwarding a verified + // JWT to the backend. The verified JWT could have been extracted + // from any location (query param, header, or cookie). + // + // The header value will be base64-URL-encoded, and will not be + // padded unless PadForwardPayloadHeader is true. + HeaderName string `json:",omitempty" alias:"header_name"` + + // PadForwardPayloadHeader determines whether padding should be added + // to the base64 encoded token forwarded with ForwardPayloadHeader. + // + // Default value is false. + PadForwardPayloadHeader bool `json:",omitempty" alias:"pad_forward_payload_header"` +} + +// JSONWebKeySet defines a key set, its location on disk, or the +// means with which to fetch a key set from a remote server. +// +// Exactly one of Local or Remote must be specified. +type JSONWebKeySet struct { + // Local specifies a local source for the key set. + Local *LocalJWKS `json:",omitempty"` + + // Remote specifies how to fetch a key set from a remote server. + Remote *RemoteJWKS `json:",omitempty"` +} + +// LocalJWKS specifies a location for a local JWKS. +// +// Only one of String and Filename can be specified. +type LocalJWKS struct { + // JWKS contains a base64 encoded JWKS. + JWKS string `json:",omitempty"` + + // Filename configures a location on disk where the JWKS can be + // found. If specified, the file must be present on the disk of ALL + // proxies with intentions referencing this provider. + Filename string `json:",omitempty"` +} + +// RemoteJWKS specifies how to fetch a JWKS from a remote server. +type RemoteJWKS struct { + // URI is the URI of the server to query for the JWKS. + URI string `json:",omitempty"` + + // RequestTimeoutMs is the number of milliseconds to + // time out when making a request for the JWKS. + RequestTimeoutMs int `json:",omitempty" alias:"request_timeout_ms"` + + // CacheDuration is the duration after which cached keys + // should be expired. + // + // Default value is 5 minutes. + CacheDuration time.Duration `json:",omitempty" alias:"cache_duration"` + + // FetchAsynchronously indicates that the JWKS should be fetched + // when a client request arrives. Client requests will be paused + // until the JWKS is fetched. + // If false, the proxy listener will wait for the JWKS to be + // fetched before being activated. + // + // Default value is false. + FetchAsynchronously bool `json:",omitempty" alias:"fetch_asynchronously"` + + // RetryPolicy defines a retry policy for fetching JWKS. + // + // There is no retry by default. + RetryPolicy *JWKSRetryPolicy `json:",omitempty" alias:"retry_policy"` +} + +type JWKSRetryPolicy struct { + // NumRetries is the number of times to retry fetching the JWKS. + // The retry strategy uses jittered exponential backoff with + // a base interval of 1s and max of 10s. + // + // Default value is 0. + NumRetries int `json:",omitempty" alias:"num_retries"` + + // Backoff policy + // + // Defaults to Envoy's backoff policy + RetryPolicyBackOff *RetryPolicyBackOff `json:",omitempty" alias:"retry_policy_back_off"` +} + +type RetryPolicyBackOff struct { + // BaseInterval to be used for the next back off computation + // + // The default value from envoy is 1s + BaseInterval time.Duration `json:",omitempty" alias:"base_interval"` + + // MaxInternal to be used to specify the maximum interval between retries. + // Optional but should be greater or equal to BaseInterval. + // + // Defaults to 10 times BaseInterval + MaxInterval time.Duration `json:",omitempty" alias:"max_interval"` +} + +type JWTCacheConfig struct { + // Size specifies the maximum number of JWT verification + // results to cache. + // + // Defaults to 0, meaning that JWT caching is disabled. + Size int `json:",omitempty"` +} + +func (e *JWTProviderConfigEntry) GetKind() string { + return JWTProvider +} + +func (e *JWTProviderConfigEntry) GetName() string { return e.Name } +func (e *JWTProviderConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *JWTProviderConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *JWTProviderConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *JWTProviderConfigEntry) GetPartition() string { return e.Partition } +func (e *JWTProviderConfigEntry) GetNamespace() string { return e.Namespace } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go index 98b882247..1a1ebb8b5 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -19,6 +22,10 @@ type MeshConfigEntry struct { // in transparent mode. TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` + // AllowEnablingPermissiveMutualTLS must be true in order to allow setting + // MutualTLSMode=permissive in either service-defaults or proxy-defaults. + AllowEnablingPermissiveMutualTLS bool `json:",omitempty" alias:"allow_enabling_permissive_mutual_tls"` + TLS *MeshTLSConfig `json:",omitempty"` HTTP *MeshHTTPConfig `json:",omitempty"` diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go new file mode 100644 index 000000000..8df7d4c98 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type ReadWriteRatesConfig struct { + ReadRate float64 + WriteRate float64 +} + +type RateLimitIPConfigEntry struct { + // Kind of the config entry. This will be set to structs.RateLimitIPConfig + Kind string + Name string + Mode string // {permissive, enforcing, disabled} + + Meta map[string]string `json:",omitempty"` + // overall limits + ReadRate float64 + WriteRate float64 + + //limits specific to a type of call + ACL *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryACL OperationCategory = "ACL" + Catalog *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryCatalog OperationCategory = "Catalog" + ConfigEntry *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryConfigEntry OperationCategory = "ConfigEntry" + ConnectCA *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryConnectCA OperationCategory = "ConnectCA" + Coordinate *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryCoordinate OperationCategory = "Coordinate" + DiscoveryChain *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDiscoveryChain OperationCategory = "DiscoveryChain" + ServerDiscovery *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryServerDiscovery OperationCategory = "ServerDiscovery" + Health *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryHealth OperationCategory = "Health" + Intention *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryIntention OperationCategory = "Intention" + KV *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryKV OperationCategory = "KV" + Tenancy *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPartition OperationCategory = "Tenancy" + PreparedQuery *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPreparedQuery OperationCategory = "PreparedQuery" + Session *ReadWriteRatesConfig `json:",omitempty"` // OperationCategorySession OperationCategory = "Session" + Txn *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryTxn OperationCategory = "Txn" + AutoConfig *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryAutoConfig OperationCategory = "AutoConfig" + FederationState *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryFederationState OperationCategory = "FederationState" + Internal *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryInternal OperationCategory = "Internal" + PeerStream *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPeerStream OperationCategory = "PeerStream" + Peering *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryPeering OperationCategory = "Peering" + DataPlane *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDataPlane OperationCategory = "DataPlane" + DNS *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryDNS OperationCategory = "DNS" + Subscribe *ReadWriteRatesConfig `json:",omitempty"` // OperationCategorySubscribe OperationCategory = "Subscribe" + Resource *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryResource OperationCategory = "Resource" + + // Partition is the partition the config entry is associated with. + // Partitioning is a Consul Enterprise feature. + Partition string `json:",omitempty"` + + // Namespace is the namespace the config entry is associated with. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 +} + +func (r *RateLimitIPConfigEntry) GetKind() string { + return RateLimitIPConfig +} +func (r *RateLimitIPConfigEntry) GetName() string { + if r == nil { + return "" + } + return r.Name +} +func (r *RateLimitIPConfigEntry) GetPartition() string { + return r.Partition +} +func (r *RateLimitIPConfigEntry) GetNamespace() string { + return r.Namespace +} +func (r *RateLimitIPConfigEntry) GetMeta() map[string]string { + if r == nil { + return nil + } + return r.Meta +} +func (r *RateLimitIPConfigEntry) GetCreateIndex() uint64 { + return r.CreateIndex +} +func (r *RateLimitIPConfigEntry) GetModifyIndex() uint64 { + return r.ModifyIndex +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go index 2edf9b23a..cfea39453 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // TCPRouteConfigEntry -- TODO stub diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go b/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go new file mode 100644 index 000000000..1217efe7d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_sameness_group.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type SamenessGroupConfigEntry struct { + Kind string + Name string + Partition string `json:",omitempty"` + DefaultForFailover bool `json:",omitempty" alias:"default_for_failover"` + IncludeLocal bool `json:",omitempty" alias:"include_local"` + Members []SamenessGroupMember + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +type SamenessGroupMember struct { + Partition string `json:",omitempty"` + Peer string `json:",omitempty"` +} + +func (s *SamenessGroupConfigEntry) GetKind() string { return s.Kind } +func (s *SamenessGroupConfigEntry) GetName() string { return s.Name } +func (s *SamenessGroupConfigEntry) GetPartition() string { return s.Partition } +func (s *SamenessGroupConfigEntry) GetNamespace() string { return "" } +func (s *SamenessGroupConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex } +func (s *SamenessGroupConfigEntry) GetModifyIndex() uint64 { return s.ModifyIndex } +func (s *SamenessGroupConfigEntry) GetMeta() map[string]string { return s.Meta } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_status.go b/vendor/github.com/hashicorp/consul/api/config_entry_status.go index 83523643b..2d16ea0fc 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_status.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_status.go @@ -1,7 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( + "fmt" "time" + + "golang.org/x/exp/slices" ) // ResourceReference is a reference to a ConfigEntry @@ -43,7 +49,7 @@ type Condition struct { // Type is a value from a bounded set of types that an object might have Type string // Status is a value from a bounded set of statuses that an object might have - Status string + Status ConditionStatus // Reason is a value from a bounded set of reasons for a given status Reason string // Message is a message that gives more detailed information about @@ -55,3 +61,279 @@ type Condition struct { // LastTransitionTime is the time at which this Condition was created LastTransitionTime *time.Time } + +type ( + ConditionStatus string +) + +const ( + ConditionStatusTrue ConditionStatus = "True" + ConditionStatusFalse ConditionStatus = "False" + ConditionStatusUnknown ConditionStatus = "Unknown" +) + +// GatewayConditionType is a type of condition associated with a +// Gateway. This type should be used with the GatewayStatus.Conditions +// field. +type GatewayConditionType string + +// GatewayConditionReason defines the set of reasons that explain why a +// particular Gateway condition type has been raised. +type GatewayConditionReason string + +// the following are directly from the k8s spec +const ( + // This condition is true when the controller managing the Gateway is + // syntactically and semantically valid enough to produce some configuration + // in the underlying data plane. This does not indicate whether or not the + // configuration has been propagated to the data plane. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * InvalidCertificates + // + GatewayConditionAccepted GatewayConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the condition is + // True. + GatewayReasonAccepted GatewayConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the gateway has multiple invalid + // certificates and cannot bind to any routes + GatewayReasonInvalidCertificates GatewayConditionReason = "InvalidCertificates" + + // This condition indicates that the gateway was unable to resolve + // conflicting specification requirements for this Listener. If a + // Listener is conflicted, its network port should not be configured + // on any network elements. + // + // Possible reasons for this condition to be true are: + // + // * "RouteConflict" + // + // Possible reasons for this condition to be False are: + // + // * "NoConflict" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionConflicted GatewayConditionType = "Conflicted" + // This reason is used with the "Conflicted" condition when the condition + // is False. + GatewayReasonNoConflict GatewayConditionReason = "NoConflict" + // This reason is used with the "Conflicted" condition when the route is + // in a conflicted state, such as when a TCPListener attempts to bind to two routes + GatewayReasonRouteConflict GatewayConditionReason = "RouteConflict" + + // This condition indicates whether the controller was able to + // resolve all the object references for the Gateway. When setting this + // condition to False, a ResourceReference to the misconfigured Listener should + // be provided. + // + // Possible reasons for this condition to be true are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCertificateRef" + // * "InvalidRouteKinds" + // * "RefNotPermitted" + // + GatewayConditionResolvedRefs GatewayConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + GatewayReasonResolvedRefs GatewayConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when a + // Listener has a TLS configuration with at least one TLS CertificateRef + // that is invalid or does not exist. + // A CertificateRef is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + // This reason must be used only when the reference is allowed, either by + // referencing an object in the same namespace as the Gateway, or when + // a cross-namespace reference has been explicitly allowed by a ReferenceGrant. + // If the reference is not allowed, the reason RefNotPermitted must be used + // instead. + GatewayListenerReasonInvalidCertificateRef GatewayConditionReason = "InvalidCertificateRef" +) + +var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[ConditionStatus][]GatewayConditionReason{ + GatewayConditionAccepted: { + ConditionStatusTrue: { + GatewayReasonAccepted, + }, + ConditionStatusFalse: { + GatewayReasonInvalidCertificates, + }, + ConditionStatusUnknown: {}, + }, + GatewayConditionConflicted: { + ConditionStatusTrue: { + GatewayReasonRouteConflict, + }, + ConditionStatusFalse: { + GatewayReasonNoConflict, + }, + ConditionStatusUnknown: {}, + }, + GatewayConditionResolvedRefs: { + ConditionStatusTrue: { + GatewayReasonResolvedRefs, + }, + ConditionStatusFalse: { + GatewayListenerReasonInvalidCertificateRef, + }, + ConditionStatusUnknown: {}, + }, +} + +func ValidateGatewayConditionReason(name GatewayConditionType, status ConditionStatus, reason GatewayConditionReason) error { + if err := checkConditionStatus(status); err != nil { + return err + } + + reasons, ok := validGatewayConditionReasonsMapping[name] + if !ok { + return fmt.Errorf("unrecognized GatewayConditionType %q", name) + } + + reasonsForStatus, ok := reasons[status] + if !ok { + return fmt.Errorf("unrecognized ConditionStatus %q", status) + } + + if !slices.Contains(reasonsForStatus, reason) { + return fmt.Errorf("gateway condition reason %q not allowed for gateway condition type %q with status %q", reason, name, status) + } + return nil +} + +// RouteConditionType is a type of condition for a route. +type RouteConditionType string + +// RouteConditionReason is a reason for a route condition. +type RouteConditionReason string + +// The following statuses are taken from the K8's Spec +// With the exception of: "RouteReasonInvalidDiscoveryChain" and "NoUpstreamServicesTargeted" +const ( + // This condition indicates whether the route has been accepted or rejected + // by a Gateway, and why. + // + // Possible reasons for this condition to be true are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidDiscoveryChain" + // * "NoUpstreamServicesTargeted" + // + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + RouteConditionAccepted RouteConditionType = "Accepted" + + // This reason is used with the "Accepted" condition when the Route has been + // accepted by the Gateway. + RouteReasonAccepted RouteConditionReason = "Accepted" + + // This reason is used with the "Accepted" condition when the route has an + // invalid discovery chain, this includes conditions like the protocol being invalid + // or the discovery chain failing to compile + RouteReasonInvalidDiscoveryChain RouteConditionReason = "InvalidDiscoveryChain" + + // This reason is used with the "Accepted" condition when the route + RouteReasonNoUpstreamServicesTargeted RouteConditionReason = "NoUpstreamServicesTargeted" +) + +// the following statuses are custom to Consul +const ( + // This condition indicates whether the route was able to successfully bind the + // Listener on the gateway + // Possible reasons for this condition to be true are: + // + // * "Bound" + // + // Possible reasons for this condition to be false are: + // + // * "FailedToBind" + // * "GatewayNotFound" + // + RouteConditionBound RouteConditionType = "Bound" + + // This reason is used with the "Bound" condition when the condition + // is true + RouteReasonBound RouteConditionReason = "Bound" + + // This reason is used with the "Bound" condition when the route failed + // to bind to the gateway + RouteReasonFailedToBind RouteConditionReason = "FailedToBind" + + // This reason is used with the "Bound" condition when the route fails + // to find the gateway + RouteReasonGatewayNotFound RouteConditionReason = "GatewayNotFound" +) + +var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStatus][]RouteConditionReason{ + RouteConditionAccepted: { + ConditionStatusTrue: { + RouteReasonAccepted, + }, + ConditionStatusFalse: { + RouteReasonInvalidDiscoveryChain, + RouteReasonNoUpstreamServicesTargeted, + }, + ConditionStatusUnknown: {}, + }, + RouteConditionBound: { + ConditionStatusTrue: { + RouteReasonBound, + }, + ConditionStatusFalse: { + RouteReasonGatewayNotFound, + RouteReasonFailedToBind, + }, + ConditionStatusUnknown: {}, + }, +} + +func ValidateRouteConditionReason(name RouteConditionType, status ConditionStatus, reason RouteConditionReason) error { + if err := checkConditionStatus(status); err != nil { + return err + } + + reasons, ok := validRouteConditionReasonsMapping[name] + if !ok { + return fmt.Errorf("unrecognized RouteConditionType %s", name) + } + + reasonsForStatus, ok := reasons[status] + if !ok { + return fmt.Errorf("unrecognized ConditionStatus %s", name) + } + + if !slices.Contains(reasonsForStatus, reason) { + return fmt.Errorf("route condition reason %s not allowed for route condition type %s with status %s", reason, name, status) + } + + return nil +} + +func checkConditionStatus(status ConditionStatus) error { + switch status { + case ConditionStatusTrue, ConditionStatusFalse, ConditionStatusUnknown: + return nil + default: + return fmt.Errorf("unrecognized condition status: %q", status) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go index a40d1e232..77be00034 100644 --- a/vendor/github.com/hashicorp/consul/api/connect.go +++ b/vendor/github.com/hashicorp/consul/api/connect.go @@ -1,5 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api +// TelemetryCollectorName is the service name for the Consul Telemetry Collector +const TelemetryCollectorName string = "consul-telemetry-collector" + // Connect can be used to work with endpoints related to Connect, the // feature for securely connecting services within Consul. type Connect struct { diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go index 69c652dac..8a5c9f870 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_ca.go +++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go index 0c2500fd0..e91c03e8b 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -40,6 +43,10 @@ type Intention struct { // same level of tenancy (partition is local to cluster, peer is remote). SourcePeer string `json:",omitempty"` + // SourceSamenessGroup cannot be wildcards "*" and + // is not compatible with legacy intentions. + SourceSamenessGroup string `json:",omitempty"` + // SourceType is the type of the value for the source. SourceType IntentionSourceType diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go index 7ef6ce274..b0269adae 100644 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go index b7e80b88d..e6b5dc52d 100644 --- a/vendor/github.com/hashicorp/consul/api/debug.go +++ b/vendor/github.com/hashicorp/consul/api/debug.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go index 4217603cf..4b6260cf3 100644 --- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go +++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -221,6 +224,7 @@ func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error { // compiled form of ServiceResolverFailover type DiscoveryFailover struct { Targets []string + Policy ServiceResolverFailoverPolicy `json:",omitempty"` } // DiscoveryTarget represents all of the inputs necessary to use a resolver diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go index ceded6598..efba89d3b 100644 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index a89b4b727..932317fdb 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/internal.go b/vendor/github.com/hashicorp/consul/api/internal.go new file mode 100644 index 000000000..dee161a65 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/internal.go @@ -0,0 +1,64 @@ +package api + +import "context" + +// Internal can be used to query endpoints that are intended for +// Hashicorp internal-use only. +type Internal struct { + c *Client +} + +// Internal returns a handle to endpoints that are for internal +// Hashicorp usage only. There is not guarantee that these will +// be backwards-compatible or supported, so usage of these is +// not encouraged. +func (c *Client) Internal() *Internal { + return &Internal{c} +} + +type AssignServiceManualVIPsRequest struct { + Service string + ManualVIPs []string +} + +type AssignServiceManualVIPsResponse struct { + ServiceFound bool `json:"Found"` + UnassignedFrom []PeeredServiceName +} + +type PeeredServiceName struct { + ServiceName CompoundServiceName + Peer string +} + +func (i *Internal) AssignServiceVirtualIP( + ctx context.Context, + service string, + manualVIPs []string, + wo *WriteOptions, +) (*AssignServiceManualVIPsResponse, *QueryMeta, error) { + req := i.c.newRequest("PUT", "/v1/internal/service-virtual-ip") + req.setWriteOptions(wo) + req.ctx = ctx + req.obj = AssignServiceManualVIPsRequest{ + Service: service, + ManualVIPs: manualVIPs, + } + rtt, resp, err := i.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{RequestTime: rtt} + parseQueryMeta(resp, qm) + + var out AssignServiceManualVIPsResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index 85a9d7750..b9d330a6f 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go index 221a7add3..e9529f7bd 100644 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go index 65cc6f3f3..98afd2299 100644 --- a/vendor/github.com/hashicorp/consul/api/namespace.go +++ b/vendor/github.com/hashicorp/consul/api/namespace.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go index 079e22486..667dcd872 100644 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Operator can be used to perform low-level operator tasks for Consul. diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go index f9fa1339e..9228d89b4 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // The /v1/operator/area endpoints are available only in Consul Enterprise and diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go index 6ab576970..7628bf6f2 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go index 6db31a252..aefec9e27 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // keyringRequest is used for performing Keyring operations diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go index 74eed3baa..1e3496da0 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_license.go +++ b/vendor/github.com/hashicorp/consul/api/operator_license.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index 1da20e899..393d6fb3c 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // RaftServer has information about a server in the Raft configuration. diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go index 92b05d3c0..6115a7ab4 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_segment.go +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // SegmentList returns all the available LAN segments. diff --git a/vendor/github.com/hashicorp/consul/api/operator_usage.go b/vendor/github.com/hashicorp/consul/api/operator_usage.go index d07e774d8..e47d4b53e 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_usage.go +++ b/vendor/github.com/hashicorp/consul/api/operator_usage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api type Usage struct { diff --git a/vendor/github.com/hashicorp/consul/api/partition.go b/vendor/github.com/hashicorp/consul/api/partition.go index 88edfb7b0..8467c3118 100644 --- a/vendor/github.com/hashicorp/consul/api/partition.go +++ b/vendor/github.com/hashicorp/consul/api/partition.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/peering.go b/vendor/github.com/hashicorp/consul/api/peering.go index 34602c878..dd7780f63 100644 --- a/vendor/github.com/hashicorp/consul/api/peering.go +++ b/vendor/github.com/hashicorp/consul/api/peering.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -44,6 +47,16 @@ type PeeringRemoteInfo struct { Partition string // Datacenter is the remote peer's datacenter. Datacenter string + Locality *Locality `json:",omitempty"` +} + +// Locality identifies where a given entity is running. +type Locality struct { + // Region is region the zone belongs to. + Region string + + // Zone is the zone the entity is running in. + Zone string } type Peering struct { diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index f47a58373..bb40e6a7f 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // QueryFailoverOptions sets options about how we fail over if there are no @@ -26,6 +29,14 @@ type QueryFailoverTarget struct { // Datacenter specifies a datacenter to try during failover. Datacenter string + + // Partition specifies a partition to try during failover + // Note: Partition are available only in Consul Enterprise + Partition string + + // Namespace specifies a namespace to try during failover + // Note: Namespaces are available only in Consul Enterprise + Namespace string } // QueryDNSOptions controls settings when query results are served over DNS. @@ -40,9 +51,17 @@ type ServiceQuery struct { // Service is the service to query. Service string + // SamenessGroup specifies a sameness group to query. The first member of the Sameness Group will + // be targeted first on PQ execution and subsequent members will be targeted during failover scenarios. + // This field is mutually exclusive with Failover. + SamenessGroup string `json:",omitempty"` + // Namespace of the service to query Namespace string `json:",omitempty"` + // Partition of the service to query + Partition string `json:",omitempty"` + // Near allows baking in the name of a node to automatically distance- // sort from. The magic "_agent" value is supported, which sorts near // the agent which initiated the request by default. @@ -50,7 +69,7 @@ type ServiceQuery struct { // Failover controls what we do if there are no healthy nodes in the // local datacenter. - Failover QueryFailoverOptions + Failover QueryFailoverOptions `json:",omitempty"` // IgnoreCheckIDs is an optional list of health check IDs to ignore when // considering which nodes are healthy. It is useful as an emergency measure diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go index 745a208c9..639513d29 100644 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Raw can be used to do raw queries against custom endpoints diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go index 066ce33a9..9d98ff5c2 100644 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go index 3f61acfbb..69fd77d27 100644 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go index b526b79c3..bcc80e5b3 100644 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go index 86f943bc7..8c52eb222 100644 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Status can be used to query the Status endpoints diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go index 4aa06d9f5..59adafdac 100644 --- a/vendor/github.com/hashicorp/consul/api/txn.go +++ b/vendor/github.com/hashicorp/consul/api/txn.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/cronexpr/LICENSE b/vendor/github.com/hashicorp/cronexpr/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/hashicorp/cronexpr/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE index abaf1e45f..9938fb50e 100644 --- a/vendor/github.com/hashicorp/go-hclog/LICENSE +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -1,6 +1,4 @@ -MIT License - -Copyright (c) 2017 HashiCorp +Copyright (c) 2017 HashiCorp, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go index 99cc176a4..d00816b38 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + //go:build !windows // +build !windows @@ -7,23 +10,35 @@ import ( "github.com/mattn/go-isatty" ) +// hasFD is used to check if the writer has an Fd value to check +// if it's a terminal. +type hasFD interface { + Fd() uintptr +} + // setColorization will mutate the values of this logger // to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { - switch opts.Color { - case ColorOff: - fallthrough - case ForceColor: + if opts.Color != AutoColor { return - case AutoColor: - fi := l.checkWriterIsFile() - isUnixTerm := isatty.IsTerminal(fi.Fd()) - isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) - isTerm := isUnixTerm || isCygwinTerm - if !isTerm { + } + + if sc, ok := l.writer.w.(SupportsColor); ok { + if !sc.SupportsColor() { l.headerColor = ColorOff l.writer.color = ColorOff } + return + } + + fi, ok := l.writer.w.(hasFD) + if !ok { + return + } + + if !isatty.IsTerminal(fi.Fd()) { + l.headerColor = ColorOff + l.writer.color = ColorOff } } diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go index 26f8cef8d..2c3fb9ea6 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + //go:build windows // +build windows @@ -7,32 +10,32 @@ import ( "os" colorable "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" ) // setColorization will mutate the values of this logger // to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { - switch opts.Color { - case ColorOff: + if opts.Color == ColorOff { + return + } + + fi, ok := l.writer.w.(*os.File) + if !ok { + l.writer.color = ColorOff + l.headerColor = ColorOff return - case ForceColor: - fi := l.checkWriterIsFile() - l.writer.w = colorable.NewColorable(fi) - case AutoColor: - fi := l.checkWriterIsFile() - isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) - isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) - isTerm := isUnixTerm || isCygwinTerm - if !isTerm { - l.writer.color = ColorOff - l.headerColor = ColorOff - return - } - - if l.headerColor == ColorOff { - l.writer.w = colorable.NewColorable(fi) - } + } + + cfi := colorable.NewColorable(fi) + + // NewColorable detects if color is possible and if it's not, then it + // returns the original value. So we can test if we got the original + // value back to know if color is possible. + if cfi == fi { + l.writer.color = ColorOff + l.headerColor = ColorOff + } else { + l.writer.w = cfi } } diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go index 7815f5019..eb5aba556 100644 --- a/vendor/github.com/hashicorp/go-hclog/context.go +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go index cfd4307a8..4b73ba553 100644 --- a/vendor/github.com/hashicorp/go-hclog/exclude.go +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 48ff1f3a4..a7403f593 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index ff42f1bfc..e9b1c1885 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 89d26c9b0..b45064acf 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( @@ -8,7 +11,6 @@ import ( "fmt" "io" "log" - "os" "reflect" "runtime" "sort" @@ -86,6 +88,8 @@ type intLogger struct { // create subloggers with their own level setting independentLevels bool + + subloggerHook func(sub Logger) Logger } // New returns a configured logger. @@ -152,6 +156,7 @@ func newLogger(opts *LoggerOptions) *intLogger { independentLevels: opts.IndependentLevels, headerColor: headerColor, fieldColor: fieldColor, + subloggerHook: opts.SubloggerHook, } if opts.IncludeLocation { l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset @@ -167,6 +172,10 @@ func newLogger(opts *LoggerOptions) *intLogger { l.timeFormat = opts.TimeFormat } + if l.subloggerHook == nil { + l.subloggerHook = identityHook + } + l.setColorization(opts) atomic.StoreInt32(l.level, int32(level)) @@ -174,6 +183,10 @@ func newLogger(opts *LoggerOptions) *intLogger { return l } +func identityHook(logger Logger) Logger { + return logger +} + // offsetIntLogger is the stack frame offset in the call stack for the caller to // one of the Warn, Info, Log, etc methods. const offsetIntLogger = 3 @@ -775,7 +788,7 @@ func (l *intLogger) With(args ...interface{}) Logger { sl.implied = append(sl.implied, MissingKey, extra) } - return sl + return l.subloggerHook(sl) } // Create a new sub-Logger that a name decending from the current name. @@ -789,7 +802,7 @@ func (l *intLogger) Named(name string) Logger { sl.name = name } - return sl + return l.subloggerHook(sl) } // Create a new sub-Logger with an explicit name. This ignores the current @@ -800,7 +813,7 @@ func (l *intLogger) ResetNamed(name string) Logger { sl.name = name - return sl + return l.subloggerHook(sl) } func (l *intLogger) ResetOutput(opts *LoggerOptions) error { @@ -876,16 +889,6 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { } } -// checks if the underlying io.Writer is a file, and -// panics if not. For use by colorization. -func (l *intLogger) checkWriterIsFile() *os.File { - fi, ok := l.writer.w.(*os.File) - if !ok { - panic("Cannot enable coloring of non-file Writers") - } - return fi -} - // Accept implements the SinkAdapter interface func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { i.log(name, level, msg, args...) diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 3cdb2837d..947ac0c9a 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( @@ -89,6 +92,13 @@ const ( ForceColor ) +// SupportsColor is an optional interface that can be implemented by the output +// value. If implemented and SupportsColor() returns true, then AutoColor will +// enable colorization. +type SupportsColor interface { + SupportsColor() bool +} + // LevelFromString returns a Level type for the named log level, or "NoLevel" if // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. @@ -292,6 +302,13 @@ type LoggerOptions struct { // logger will not affect any subloggers, and SetLevel on any subloggers // will not affect the parent or sibling loggers. IndependentLevels bool + + // SubloggerHook registers a function that is called when a sublogger via + // Named, With, or ResetNamed is created. If defined, the function is passed + // the newly created Logger and the returned Logger is returned from the + // original function. This option allows customization via interception and + // wrapping of Logger instances. + SubloggerHook func(sub Logger) Logger } // InterceptLogger describes the interface for using a logger diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 55e89dd31..d43da809e 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 641f20ccb..03739b61f 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go index 421a1f06c..4ee219bf0 100644 --- a/vendor/github.com/hashicorp/go-hclog/writer.go +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md new file mode 100644 index 000000000..33686e4da --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md @@ -0,0 +1,9 @@ +## 0.7.4 (Jun 6, 2023) + +BUG FIXES + +- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 [GH-194] + +## 0.7.3 (May 15, 2023) + +Initial release diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS new file mode 100644 index 000000000..f8389c995 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS @@ -0,0 +1 @@ +* @hashicorp/release-engineering \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE index e87a115e4..f4f97ee58 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2015 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index f40d2411c..cad96bd97 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package retryablehttp provides a familiar HTTP client interface with // automatic retries and exponential backoff. It is a thin wrapper over the // standard net/http client library and exposes nearly the same public API. @@ -257,10 +260,17 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro if err != nil { return nil, 0, err } - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil + if len(buf) == 0 { + bodyReader = func() (io.Reader, error) { + return http.NoBody, nil + } + contentLength = 0 + } else { + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) } - contentLength = int64(len(buf)) // No body provided, nothing to do case nil: diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go index 8f3ee3584..8c407adb3 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package retryablehttp import ( diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go index 0159a9e12..121a75934 100644 --- a/vendor/github.com/hashicorp/nomad/api/allocations.go +++ b/vendor/github.com/hashicorp/nomad/api/allocations.go @@ -30,6 +30,7 @@ const ( AllocClientStatusComplete = "complete" AllocClientStatusFailed = "failed" AllocClientStatusLost = "lost" + AllocClientStatusUnknown = "unknown" ) const ( @@ -270,6 +271,7 @@ type Allocation struct { PreviousAllocation string NextAllocation string RescheduleTracker *RescheduleTracker + NetworkStatus *AllocNetworkStatus PreemptedAllocations []string PreemptedByAllocation string CreateIndex uint64 @@ -283,6 +285,7 @@ type Allocation struct { type AllocationMetric struct { NodesEvaluated int NodesFiltered int + NodesInPool int NodesAvailable map[string]int ClassFiltered map[string]int ConstraintFiltered map[string]int @@ -326,6 +329,7 @@ func (a *Allocation) Stub() *AllocationListStub { TaskStates: a.TaskStates, DeploymentStatus: a.DeploymentStatus, FollowupEvalID: a.FollowupEvalID, + NextAllocation: a.NextAllocation, RescheduleTracker: a.RescheduleTracker, PreemptedAllocations: a.PreemptedAllocations, PreemptedByAllocation: a.PreemptedByAllocation, @@ -379,6 +383,7 @@ type AllocationListStub struct { TaskStates map[string]*TaskState DeploymentStatus *AllocDeploymentStatus FollowupEvalID string + NextAllocation string RescheduleTracker *RescheduleTracker PreemptedAllocations []string PreemptedByAllocation string @@ -398,6 +403,15 @@ type AllocDeploymentStatus struct { ModifyIndex uint64 } +// AllocNetworkStatus captures the status of an allocation's network during runtime. +// Depending on the network mode, an allocation's address may need to be known to other +// systems in Nomad such as service registration. +type AllocNetworkStatus struct { + InterfaceName string + Address string + DNS *DNSConfig +} + type AllocatedResources struct { Tasks map[string]*AllocatedTaskResources Shared AllocatedSharedResources diff --git a/vendor/github.com/hashicorp/nomad/api/api.go b/vendor/github.com/hashicorp/nomad/api/api.go index 1bf97f05f..ac755e254 100644 --- a/vendor/github.com/hashicorp/nomad/api/api.go +++ b/vendor/github.com/hashicorp/nomad/api/api.go @@ -895,13 +895,16 @@ func (c *Client) websocket(endpoint string, q *QueryOptions) (*websocket.Conn, * conn, resp, err := dialer.Dial(rhttp.URL.String(), rhttp.Header) // check resp status code, as it's more informative than handshake error we get from ws library - if resp != nil && resp.StatusCode != 101 { + if resp != nil && resp.StatusCode != http.StatusSwitchingProtocols { var buf bytes.Buffer if resp.Header.Get("Content-Encoding") == "gzip" { greader, err := gzip.NewReader(resp.Body) if err != nil { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + return nil, nil, newUnexpectedResponseError( + fromStatusCode(resp.StatusCode), + withExpectedStatuses([]int{http.StatusSwitchingProtocols}), + withError(err)) } io.Copy(&buf, greader) } else { @@ -909,7 +912,11 @@ func (c *Client) websocket(endpoint string, q *QueryOptions) (*websocket.Conn, * } resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + return nil, nil, newUnexpectedResponseError( + fromStatusCode(resp.StatusCode), + withExpectedStatuses([]int{http.StatusSwitchingProtocols}), + withBody(fmt.Sprint(buf.Bytes())), + ) } return conn, resp, err @@ -1129,24 +1136,6 @@ func encodeBody(obj interface{}) (io.Reader, error) { return buf, nil } -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - _, _ = io.Copy(&buf, resp.Body) - _ = resp.Body.Close() - body := strings.TrimSpace(buf.String()) - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, body) - } - return d, resp, nil -} - // Context returns the context used for canceling HTTP requests related to this query func (o *QueryOptions) Context() context.Context { if o != nil && o.ctx != nil { diff --git a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go index 2ce523a72..5176f5b82 100644 --- a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go +++ b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go @@ -15,6 +15,7 @@ const ( Evals Context = "evals" Jobs Context = "jobs" Nodes Context = "nodes" + NodePools Context = "node_pools" Namespaces Context = "namespaces" Quotas Context = "quotas" Recommendations Context = "recommendations" diff --git a/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go b/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go new file mode 100644 index 000000000..b843fc7ab --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/error_unexpected_response.go @@ -0,0 +1,178 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + "time" + + "golang.org/x/exp/slices" +) + +// UnexpectedResponseError tracks the components for API errors encountered when +// requireOK and requireStatusIn's conditions are not met. +type UnexpectedResponseError struct { + expected []int + statusCode int + statusText string + body string + err error + additional error +} + +func (e UnexpectedResponseError) HasExpectedStatuses() bool { return len(e.expected) > 0 } +func (e UnexpectedResponseError) ExpectedStatuses() []int { return e.expected } +func (e UnexpectedResponseError) HasStatusCode() bool { return e.statusCode != 0 } +func (e UnexpectedResponseError) StatusCode() int { return e.statusCode } +func (e UnexpectedResponseError) HasStatusText() bool { return e.statusText != "" } +func (e UnexpectedResponseError) StatusText() string { return e.statusText } +func (e UnexpectedResponseError) HasBody() bool { return e.body != "" } +func (e UnexpectedResponseError) Body() string { return e.body } +func (e UnexpectedResponseError) HasError() bool { return e.err != nil } +func (e UnexpectedResponseError) Unwrap() error { return e.err } +func (e UnexpectedResponseError) HasAdditional() bool { return e.additional != nil } +func (e UnexpectedResponseError) Additional() error { return e.additional } +func newUnexpectedResponseError(src unexpectedResponseErrorSource, opts ...unexpectedResponseErrorOption) UnexpectedResponseError { + nErr := src() + for _, opt := range opts { + opt(nErr) + } + if nErr.statusText == "" { + // the stdlib's http.StatusText function is a good place to start + nErr.statusFromCode(http.StatusText) + } + + return *nErr +} + +// Use textual representation of the given integer code. Called when status text +// is not set using the WithStatusText option. +func (e UnexpectedResponseError) statusFromCode(f func(int) string) { + e.statusText = f(e.statusCode) + if !e.HasStatusText() { + e.statusText = "unknown status code" + } +} + +func (e UnexpectedResponseError) Error() string { + var eTxt strings.Builder + eTxt.WriteString("Unexpected response code") + if e.HasBody() || e.HasStatusCode() { + eTxt.WriteString(": ") + } + if e.HasStatusCode() { + eTxt.WriteString(fmt.Sprint(e.statusCode)) + if e.HasBody() { + eTxt.WriteRune(' ') + } + } + if e.HasBody() { + eTxt.WriteString(fmt.Sprintf("(%s)", e.body)) + } + + if e.HasAdditional() { + eTxt.WriteString(fmt.Sprintf(". Additionally, an error occurred while constructing this error (%s); the body might be truncated or missing.", e.additional.Error())) + } + + return eTxt.String() +} + +// UnexpectedResponseErrorOptions are functions passed to NewUnexpectedResponseError +// to customize the created error. +type unexpectedResponseErrorOption func(*UnexpectedResponseError) + +// withError allows the addition of a Go error that may have been encountered +// while processing the response. For example, if there is an error constructing +// the gzip reader to process a gzip-encoded response body. +func withError(e error) unexpectedResponseErrorOption { + return func(u *UnexpectedResponseError) { u.err = e } +} + +// withBody overwrites the Body value with the provided custom value +func withBody(b string) unexpectedResponseErrorOption { + return func(u *UnexpectedResponseError) { u.body = b } +} + +// withStatusText overwrites the StatusText value the provided custom value +func withStatusText(st string) unexpectedResponseErrorOption { + return func(u *UnexpectedResponseError) { u.statusText = st } +} + +// withExpectedStatuses provides a list of statuses that the receiving function +// expected to receive. This can be used by API callers to provide more feedback +// to end-users. +func withExpectedStatuses(s []int) unexpectedResponseErrorOption { + return func(u *UnexpectedResponseError) { u.expected = slices.Clone(s) } +} + +// unexpectedResponseErrorSource provides the basis for a NewUnexpectedResponseError. +type unexpectedResponseErrorSource func() *UnexpectedResponseError + +// fromHTTPResponse read an open HTTP response, drains and closes its body as +// the data for the UnexpectedResponseError. +func fromHTTPResponse(resp *http.Response) unexpectedResponseErrorSource { + return func() *UnexpectedResponseError { + u := new(UnexpectedResponseError) + + if resp != nil { + // collect and close the body + var buf bytes.Buffer + if _, e := io.Copy(&buf, resp.Body); e != nil { + u.additional = e + } + + // Body has been tested as safe to close more than once + _ = resp.Body.Close() + body := strings.TrimSpace(buf.String()) + + // make and return the error + u.statusCode = resp.StatusCode + u.statusText = strings.TrimSpace(strings.TrimPrefix(resp.Status, fmt.Sprint(resp.StatusCode))) + u.body = body + } + return u + } +} + +// fromStatusCode attempts to resolve the status code to status text using +// the resolving function provided inside of the NewUnexpectedResponseError +// implementation. +func fromStatusCode(sc int) unexpectedResponseErrorSource { + return func() *UnexpectedResponseError { return &UnexpectedResponseError{statusCode: sc} } +} + +// doRequestWrapper is a function that wraps the client's doRequest method +// and can be used to provide error and response handling +type doRequestWrapper = func(time.Duration, *http.Response, error) (time.Duration, *http.Response, error) + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + f := requireStatusIn(http.StatusOK) + return f(d, resp, e) +} + +// requireStatusIn is a doRequestWrapper generator that takes expected HTTP +// response codes and validates that the received response code is among them +func requireStatusIn(statuses ...int) doRequestWrapper { + return func(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + _ = resp.Body.Close() + } + return d, nil, e + } + + for _, status := range statuses { + if resp.StatusCode == status { + return d, resp, nil + } + } + + return d, nil, newUnexpectedResponseError(fromHTTPResponse(resp), withExpectedStatuses(statuses)) + } +} diff --git a/vendor/github.com/hashicorp/nomad/api/event_stream.go b/vendor/github.com/hashicorp/nomad/api/event_stream.go index 60e08fbe1..7721d15cc 100644 --- a/vendor/github.com/hashicorp/nomad/api/event_stream.go +++ b/vendor/github.com/hashicorp/nomad/api/event_stream.go @@ -19,6 +19,7 @@ const ( TopicAllocation Topic = "Allocation" TopicJob Topic = "Job" TopicNode Topic = "Node" + TopicNodePool Topic = "NodePool" TopicService Topic = "Service" TopicAll Topic = "*" ) @@ -99,6 +100,16 @@ func (e *Event) Node() (*Node, error) { return out.Node, nil } +// NodePool returns a NodePool struct from a given event payload. If the Event +// Topic is NodePool this will return a valid NodePool. +func (e *Event) NodePool() (*NodePool, error) { + out, err := e.decodePayload() + if err != nil { + return nil, err + } + return out.NodePool, nil +} + // Service returns a ServiceRegistration struct from a given event payload. If // the Event Topic is Service this will return a valid ServiceRegistration. func (e *Event) Service() (*ServiceRegistration, error) { @@ -115,6 +126,7 @@ type eventPayload struct { Evaluation *Evaluation `mapstructure:"Evaluation"` Job *Job `mapstructure:"Job"` Node *Node `mapstructure:"Node"` + NodePool *NodePool `mapstructure:"NodePool"` Service *ServiceRegistration `mapstructure:"Service"` } diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go index 64b25c710..3b60f695b 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -29,6 +29,9 @@ const ( // on all clients. JobTypeSysbatch = "sysbatch" + // JobDefaultPriority is the default priority if not specified. + JobDefaultPriority = 50 + // PeriodicSpecCron is used for a cron spec. PeriodicSpecCron = "cron" @@ -444,6 +447,9 @@ func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanR if job == nil { return nil, nil, errors.New("must pass non-nil job") } + if job.ID == nil { + return nil, nil, errors.New("job is missing ID") + } // Setup the request req := &JobPlanRequest{ @@ -790,9 +796,11 @@ func (m *Multiregion) Copy() *Multiregion { copyRegion.Name = region.Name copyRegion.Count = pointerOf(*region.Count) copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...) + copyRegion.NodePool = region.NodePool for k, v := range region.Meta { copyRegion.Meta[k] = v } + copy.Regions = append(copy.Regions, copyRegion) } return copy @@ -807,6 +815,7 @@ type MultiregionRegion struct { Name string `hcl:",label"` Count *int `hcl:"count,optional"` Datacenters []string `hcl:"datacenters,optional"` + NodePool string `hcl:"node_pool,optional"` Meta map[string]string `hcl:"meta,block"` } @@ -940,6 +949,7 @@ type Job struct { Priority *int `hcl:"priority,optional"` AllAtOnce *bool `mapstructure:"all_at_once" hcl:"all_at_once,optional"` Datacenters []string `hcl:"datacenters,optional"` + NodePool *string `hcl:"node_pool,optional"` Constraints []*Constraint `hcl:"constraint,block"` Affinities []*Affinity `hcl:"affinity,block"` TaskGroups []*TaskGroup `hcl:"group,block"` @@ -1003,7 +1013,7 @@ func (j *Job) Canonicalize() { j.Namespace = pointerOf(DefaultNamespace) } if j.Priority == nil { - j.Priority = pointerOf(0) + j.Priority = pointerOf(JobDefaultPriority) } if j.Stop == nil { j.Stop = pointerOf(false) @@ -1011,8 +1021,8 @@ func (j *Job) Canonicalize() { if j.Region == nil { j.Region = pointerOf(GlobalRegion) } - if j.Namespace == nil { - j.Namespace = pointerOf("default") + if j.NodePool == nil { + j.NodePool = pointerOf("") } if j.Type == nil { j.Type = pointerOf("service") diff --git a/vendor/github.com/hashicorp/nomad/api/namespace.go b/vendor/github.com/hashicorp/nomad/api/namespace.go index 80f9fe88e..d1b4fbbee 100644 --- a/vendor/github.com/hashicorp/nomad/api/namespace.go +++ b/vendor/github.com/hashicorp/nomad/api/namespace.go @@ -70,20 +70,31 @@ func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, erro // Namespace is used to serialize a namespace. type Namespace struct { - Name string - Description string - Quota string - Capabilities *NamespaceCapabilities `hcl:"capabilities,block"` - Meta map[string]string - CreateIndex uint64 - ModifyIndex uint64 + Name string + Description string + Quota string + Capabilities *NamespaceCapabilities `hcl:"capabilities,block"` + NodePoolConfiguration *NamespaceNodePoolConfiguration `hcl:"node_pool_config,block"` + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 } +// NamespaceCapabilities represents a set of capabilities allowed for this +// namespace, to be checked at job submission time. type NamespaceCapabilities struct { EnabledTaskDrivers []string `hcl:"enabled_task_drivers"` DisabledTaskDrivers []string `hcl:"disabled_task_drivers"` } +// NamespaceNodePoolConfiguration stores configuration about node pools for a +// namespace. +type NamespaceNodePoolConfiguration struct { + Default string + Allowed []string + Denied []string +} + // NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We // reverse the test so that we get the highest index first. type NamespaceIndexSort []*Namespace diff --git a/vendor/github.com/hashicorp/nomad/api/node_pools.go b/vendor/github.com/hashicorp/nomad/api/node_pools.go new file mode 100644 index 000000000..d3f18bd87 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/node_pools.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + // NodePoolAll is the node pool that always includes all nodes. + NodePoolAll = "all" + + // NodePoolDefault is the default node pool. + NodePoolDefault = "default" +) + +// NodePools is used to access node pools endpoints. +type NodePools struct { + client *Client +} + +// NodePools returns a handle on the node pools endpoints. +func (c *Client) NodePools() *NodePools { + return &NodePools{client: c} +} + +// List is used to list all node pools. +func (n *NodePools) List(q *QueryOptions) ([]*NodePool, *QueryMeta, error) { + var resp []*NodePool + qm, err := n.client.query("/v1/node/pools", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// PrefixList is used to list node pools that match a given prefix. +func (n *NodePools) PrefixList(prefix string, q *QueryOptions) ([]*NodePool, *QueryMeta, error) { + if q == nil { + q = &QueryOptions{} + } + q.Prefix = prefix + return n.List(q) +} + +// Info is used to fetch details of a specific node pool. +func (n *NodePools) Info(name string, q *QueryOptions) (*NodePool, *QueryMeta, error) { + if name == "" { + return nil, nil, errors.New("missing node pool name") + } + + var resp NodePool + qm, err := n.client.query("/v1/node/pool/"+url.PathEscape(name), &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Register is used to create or update a node pool. +func (n *NodePools) Register(pool *NodePool, w *WriteOptions) (*WriteMeta, error) { + if pool == nil { + return nil, errors.New("missing node pool") + } + if pool.Name == "" { + return nil, errors.New("missing node pool name") + } + + wm, err := n.client.put("/v1/node/pools", pool, nil, w) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a node pool. +func (n *NodePools) Delete(name string, w *WriteOptions) (*WriteMeta, error) { + if name == "" { + return nil, errors.New("missing node pool name") + } + + wm, err := n.client.delete("/v1/node/pool/"+url.PathEscape(name), nil, nil, w) + if err != nil { + return nil, err + } + return wm, nil +} + +// ListJobs is used to list all the jobs in a node pool. +func (n *NodePools) ListJobs(poolName string, q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { + var resp []*JobListStub + qm, err := n.client.query( + fmt.Sprintf("/v1/node/pool/%s/jobs", url.PathEscape(poolName)), + &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// ListNodes is used to list all the nodes in a node pool. +func (n *NodePools) ListNodes(poolName string, q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { + var resp []*NodeListStub + qm, err := n.client.query( + fmt.Sprintf("/v1/node/pool/%s/nodes", url.PathEscape(poolName)), + &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// NodePool is used to serialize a node pool. +type NodePool struct { + Name string `hcl:"name,label"` + Description string `hcl:"description,optional"` + Meta map[string]string `hcl:"meta,block"` + SchedulerConfiguration *NodePoolSchedulerConfiguration `hcl:"scheduler_config,block"` + CreateIndex uint64 + ModifyIndex uint64 +} + +// NodePoolSchedulerConfiguration is used to serialize the scheduler +// configuration of a node pool. +type NodePoolSchedulerConfiguration struct { + SchedulerAlgorithm SchedulerAlgorithm `hcl:"scheduler_algorithm,optional"` + MemoryOversubscriptionEnabled *bool `hcl:"memory_oversubscription_enabled,optional"` +} diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go index dfc5646be..697c7d731 100644 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -553,6 +553,7 @@ type Node struct { Links map[string]string Meta map[string]string NodeClass string + NodePool string CgroupParent string Drain bool DrainStrategy *DrainStrategy @@ -914,6 +915,7 @@ type NodeListStub struct { Datacenter string Name string NodeClass string + NodePool string Version string Drain bool SchedulingEligibility string diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go index ba8d41cec..32faf3546 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator.go +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -6,8 +6,8 @@ package api import ( "encoding/json" "errors" - "fmt" "io" + "net/http" "strconv" "strings" "time" @@ -341,13 +341,15 @@ func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, *QueryMeta, erro } defer resp.Body.Close() - if resp.StatusCode == 204 { + if resp.StatusCode == http.StatusNoContent { return nil, nil, errors.New("Nomad Enterprise only endpoint") } - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - return nil, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, body) + if resp.StatusCode != http.StatusOK { + return nil, nil, newUnexpectedResponseError( + fromHTTPResponse(resp), + withExpectedStatuses([]int{http.StatusOK, http.StatusNoContent}), + ) } err = json.NewDecoder(resp.Body).Decode(&reply) diff --git a/vendor/github.com/hashicorp/nomad/api/services.go b/vendor/github.com/hashicorp/nomad/api/services.go index 450236547..95f027810 100644 --- a/vendor/github.com/hashicorp/nomad/api/services.go +++ b/vendor/github.com/hashicorp/nomad/api/services.go @@ -212,6 +212,7 @@ type ServiceCheck struct { Interval time.Duration `hcl:"interval,optional"` Timeout time.Duration `hcl:"timeout,optional"` InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"` + TLSServerName string `mapstructure:"tls_server_name" hcl:"tls_server_name,optional"` TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"` Header map[string][]string `hcl:"header,block"` Method string `hcl:"method,optional"` diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go index e928b3a04..188fa8649 100644 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -641,12 +641,19 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup { type LogConfig struct { MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"` MaxFileSizeMB *int `mapstructure:"max_file_size" hcl:"max_file_size,optional"` + + // COMPAT(1.6.0): Enabled had to be swapped for Disabled to fix a backwards + // compatibility bug when restoring pre-1.5.4 jobs. Remove in 1.6.0 + Enabled *bool `mapstructure:"enabled" hcl:"enabled,optional"` + + Disabled *bool `mapstructure:"disabled" hcl:"disabled,optional"` } func DefaultLogConfig() *LogConfig { return &LogConfig{ MaxFiles: pointerOf(10), MaxFileSizeMB: pointerOf(10), + Disabled: pointerOf(false), } } @@ -657,6 +664,9 @@ func (l *LogConfig) Canonicalize() { if l.MaxFileSizeMB == nil { l.MaxFileSizeMB = pointerOf(10) } + if l.Disabled == nil { + l.Disabled = pointerOf(false) + } } // DispatchPayloadConfig configures how a task gets its input from a job dispatch @@ -909,6 +919,7 @@ type Vault struct { Policies []string `hcl:"policies,optional"` Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"` Env *bool `hcl:"env,optional"` + DisableFile *bool `mapstructure:"disable_file" hcl:"disable_file,optional"` ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` } @@ -917,6 +928,9 @@ func (v *Vault) Canonicalize() { if v.Env == nil { v.Env = pointerOf(true) } + if v.DisableFile == nil { + v.DisableFile = pointerOf(false) + } if v.Namespace == nil { v.Namespace = pointerOf("") } diff --git a/vendor/github.com/hashicorp/nomad/api/variables.go b/vendor/github.com/hashicorp/nomad/api/variables.go index 91dc13cf4..86458c13a 100644 --- a/vendor/github.com/hashicorp/nomad/api/variables.go +++ b/vendor/github.com/hashicorp/nomad/api/variables.go @@ -4,14 +4,11 @@ package api import ( - "bytes" "encoding/json" "errors" "fmt" - "io" "net/http" "strings" - "time" ) const ( @@ -457,39 +454,3 @@ type ErrCASConflict struct { func (e ErrCASConflict) Error() string { return fmt.Sprintf("cas conflict: expected ModifyIndex %v; found %v", e.CheckIndex, e.Conflict.ModifyIndex) } - -// doRequestWrapper is a function that wraps the client's doRequest method -// and can be used to provide error and response handling -type doRequestWrapper = func(time.Duration, *http.Response, error) (time.Duration, *http.Response, error) - -// requireStatusIn is a doRequestWrapper generator that takes expected HTTP -// response codes and validates that the received response code is among them -func requireStatusIn(statuses ...int) doRequestWrapper { - fn := func(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - _ = resp.Body.Close() - } - return d, nil, e - } - - for _, status := range statuses { - if resp.StatusCode == status { - return d, resp, nil - } - } - - return d, nil, generateUnexpectedResponseCodeError(resp) - } - return fn -} - -// generateUnexpectedResponseCodeError creates a standardized error -// when the the API client's newRequest method receives an unexpected -// HTTP response code when accessing the variable's HTTP API -func generateUnexpectedResponseCodeError(resp *http.Response) error { - var buf bytes.Buffer - _, _ = io.Copy(&buf, resp.Body) - _ = resp.Body.Close() - return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) -} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go deleted file mode 100644 index e2fe2f726..000000000 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go +++ /dev/null @@ -1,27 +0,0 @@ -package schema - -// ServerType defines the schema of a server type. -type ServerType struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Cores int `json:"cores"` - Memory float32 `json:"memory"` - Disk int `json:"disk"` - StorageType string `json:"storage_type"` - CPUType string `json:"cpu_type"` - Architecture string `json:"architecture"` - Prices []PricingServerTypePrice `json:"prices"` -} - -// ServerTypeListResponse defines the schema of the response when -// listing server types. -type ServerTypeListResponse struct { - ServerTypes []ServerType `json:"server_types"` -} - -// ServerTypeGetResponse defines the schema of the response when -// retrieving a single server type. -type ServerTypeGetResponse struct { - ServerType ServerType `json:"server_type"` -} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go deleted file mode 100644 index a3cfb2f0c..000000000 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/testing.go +++ /dev/null @@ -1,18 +0,0 @@ -package hcloud - -import ( - "testing" - "time" -) - -const apiTimestampFormat = "2006-01-02T15:04:05-07:00" - -func mustParseTime(t *testing.T, layout, value string) time.Time { - t.Helper() - - ts, err := time.Parse(layout, value) - if err != nil { - t.Fatalf("parse time: layout %v: value %v: %v", layout, value, err) - } - return ts -} diff --git a/vendor/github.com/hetznercloud/hcloud-go/LICENSE b/vendor/github.com/hetznercloud/hcloud-go/v2/LICENSE similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/LICENSE rename to vendor/github.com/hetznercloud/hcloud-go/v2/LICENSE diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go similarity index 89% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go index e915d7524..e4cf712b0 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/action.go @@ -6,12 +6,12 @@ import ( "net/url" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Action represents an action in the Hetzner Cloud. type Action struct { - ID int + ID int64 Status ActionStatus Command string Progress int @@ -34,7 +34,7 @@ const ( // ActionResource references other resources from an action. type ActionResource struct { - ID int + ID int64 Type ActionResourceType } @@ -76,7 +76,7 @@ type ActionClient struct { } // GetByID retrieves an action by its ID. If the action does not exist, nil is returned. -func (c *ActionClient) GetByID(ctx context.Context, id int) (*Action, *Response, error) { +func (c *ActionClient) GetByID(ctx context.Context, id int64) (*Action, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/actions/%d", id), nil) if err != nil { return nil, nil, err @@ -96,13 +96,13 @@ func (c *ActionClient) GetByID(ctx context.Context, id int) (*Action, *Response, // ActionListOpts specifies options for listing actions. type ActionListOpts struct { ListOpts - ID []int + ID []int64 Status []ActionStatus Sort []string } func (l ActionListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() for _, id := range l.ID { vals.Add("id", fmt.Sprintf("%d", id)) } @@ -140,30 +140,12 @@ func (c *ActionClient) List(ctx context.Context, opts ActionListOpts) ([]*Action // All returns all actions. func (c *ActionClient) All(ctx context.Context) ([]*Action, error) { - allActions := []*Action{} - - opts := ActionListOpts{} - opts.PerPage = 50 - - err := c.client.all(func(page int) (*Response, error) { - opts.Page = page - actions, resp, err := c.List(ctx, opts) - if err != nil { - return resp, err - } - allActions = append(allActions, actions...) - return resp, nil - }) - if err != nil { - return nil, err - } - - return allActions, nil + return c.AllWithOpts(ctx, ActionListOpts{ListOpts: ListOpts{PerPage: 50}}) } // AllWithOpts returns all actions for the given options. func (c *ActionClient) AllWithOpts(ctx context.Context, opts ActionListOpts) ([]*Action, error) { - allActions := []*Action{} + var allActions []*Action err := c.client.all(func(page int) (*Response, error) { opts.Page = page @@ -192,7 +174,7 @@ func (c *ActionClient) AllWithOpts(ctx context.Context, opts ActionListOpts) ([] // complete successfully, as well as any errors that happened while // querying the API. // -// By default the method keeps watching until all actions have finished +// By default, the method keeps watching until all actions have finished // processing. If you want to be able to cancel the method or configure a // timeout, use the [context.Context]. Once the method has stopped watching, // both returned channels are closed. @@ -207,8 +189,8 @@ func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Acti defer close(errCh) defer close(progressCh) - successIDs := make([]int, 0, len(actions)) - watchIDs := make(map[int]struct{}, len(actions)) + successIDs := make([]int64, 0, len(actions)) + watchIDs := make(map[int64]struct{}, len(actions)) for _, action := range actions { watchIDs[action.ID] = struct{}{} } @@ -241,7 +223,7 @@ func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Acti continue case ActionStatusSuccess: delete(watchIDs, a.ID) - successIDs := append(successIDs, a.ID) + successIDs = append(successIDs, a.ID) sendProgress(progressCh, int(float64(len(actions)-len(successIDs))/float64(len(actions))*100)) case ActionStatusError: delete(watchIDs, a.ID) @@ -269,7 +251,7 @@ func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Acti // API, as well as the error of the action if it did not complete // successfully, or nil if it did. // -// By default the method keeps watching until the action has finished +// By default, the method keeps watching until the action has finished // processing. If you want to be able to cancel the method or configure a // timeout, use the [context.Context]. Once the method has stopped watching, // both returned channels are closed. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/architecture.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/architecture.go similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/architecture.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/architecture.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/certificate.go similarity index 92% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/certificate.go index 627855231..3251cb7f7 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/certificate.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // CertificateType is the type of available certificate types. @@ -50,7 +50,7 @@ const ( // CertificateUsedByRef points to a resource that uses this certificate. type CertificateUsedByRef struct { - ID int + ID int64 Type CertificateUsedByRefType } @@ -68,9 +68,9 @@ func (st *CertificateStatus) IsFailed() bool { return st.Issuance == CertificateStatusTypeFailed || st.Renewal == CertificateStatusTypeFailed } -// Certificate represents an certificate in the Hetzner Cloud. +// Certificate represents a certificate in the Hetzner Cloud. type Certificate struct { - ID int + ID int64 Name string Labels map[string]string Type CertificateType @@ -96,7 +96,7 @@ type CertificateClient struct { } // GetByID retrieves a Certificate by its ID. If the Certificate does not exist, nil is returned. -func (c *CertificateClient) GetByID(ctx context.Context, id int) (*Certificate, *Response, error) { +func (c *CertificateClient) GetByID(ctx context.Context, id int64) (*Certificate, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/certificates/%d", id), nil) if err != nil { return nil, nil, err @@ -128,8 +128,8 @@ func (c *CertificateClient) GetByName(ctx context.Context, name string) (*Certif // Get retrieves a Certificate by its ID if the input can be parsed as an integer, otherwise it // retrieves a Certificate by its name. If the Certificate does not exist, nil is returned. func (c *CertificateClient) Get(ctx context.Context, idOrName string) (*Certificate, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -142,7 +142,7 @@ type CertificateListOpts struct { } func (l CertificateListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -177,25 +177,7 @@ func (c *CertificateClient) List(ctx context.Context, opts CertificateListOpts) // All returns all Certificates. func (c *CertificateClient) All(ctx context.Context) ([]*Certificate, error) { - allCertificates := []*Certificate{} - - opts := CertificateListOpts{} - opts.PerPage = 50 - - err := c.client.all(func(page int) (*Response, error) { - opts.Page = page - Certificate, resp, err := c.List(ctx, opts) - if err != nil { - return resp, err - } - allCertificates = append(allCertificates, Certificate...) - return resp, nil - }) - if err != nil { - return nil, err - } - - return allCertificates, nil + return c.AllWithOpts(ctx, CertificateListOpts{ListOpts: ListOpts{PerPage: 50}}) } // AllWithOpts returns all Certificates for the given options. @@ -260,7 +242,7 @@ func (o CertificateCreateOpts) validateUploaded() error { return nil } -// Create creates a new certificate uploaded certificate. +// Create creates a new uploaded certificate. // // Create returns an error for certificates of any other type. Use // CreateCertificate to create such certificates. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go similarity index 98% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go index f3b5a833f..0f6e2c86b 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/client.go @@ -18,8 +18,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/http/httpguts" - "github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/internal/instrumentation" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Endpoint is the base URL of the API. @@ -445,7 +445,8 @@ type ListOpts struct { LabelSelector string // Label selector for filtering by labels } -func (l ListOpts) values() url.Values { +// Values returns the ListOpts as URL values. +func (l ListOpts) Values() url.Values { vals := url.Values{} if l.Page > 0 { vals.Add("page", strconv.Itoa(l.Page)) diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/datacenter.go similarity index 82% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/datacenter.go index 5a841f945..b899eec32 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/datacenter.go @@ -6,12 +6,12 @@ import ( "net/url" "strconv" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Datacenter represents a datacenter in the Hetzner Cloud. type Datacenter struct { - ID int + ID int64 Name string Description string Location *Location @@ -30,7 +30,7 @@ type DatacenterClient struct { } // GetByID retrieves a datacenter by its ID. If the datacenter does not exist, nil is returned. -func (c *DatacenterClient) GetByID(ctx context.Context, id int) (*Datacenter, *Response, error) { +func (c *DatacenterClient) GetByID(ctx context.Context, id int64) (*Datacenter, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/datacenters/%d", id), nil) if err != nil { return nil, nil, err @@ -47,7 +47,7 @@ func (c *DatacenterClient) GetByID(ctx context.Context, id int) (*Datacenter, *R return DatacenterFromSchema(body.Datacenter), resp, nil } -// GetByName retrieves an datacenter by its name. If the datacenter does not exist, nil is returned. +// GetByName retrieves a datacenter by its name. If the datacenter does not exist, nil is returned. func (c *DatacenterClient) GetByName(ctx context.Context, name string) (*Datacenter, *Response, error) { if name == "" { return nil, nil, nil @@ -62,8 +62,8 @@ func (c *DatacenterClient) GetByName(ctx context.Context, name string) (*Datacen // Get retrieves a datacenter by its ID if the input can be parsed as an integer, otherwise it // retrieves a datacenter by its name. If the datacenter does not exist, nil is returned. func (c *DatacenterClient) Get(ctx context.Context, idOrName string) (*Datacenter, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -76,7 +76,7 @@ type DatacenterListOpts struct { } func (l DatacenterListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -111,10 +111,12 @@ func (c *DatacenterClient) List(ctx context.Context, opts DatacenterListOpts) ([ // All returns all datacenters. func (c *DatacenterClient) All(ctx context.Context) ([]*Datacenter, error) { - allDatacenters := []*Datacenter{} + return c.AllWithOpts(ctx, DatacenterListOpts{ListOpts: ListOpts{PerPage: 50}}) +} - opts := DatacenterListOpts{} - opts.PerPage = 50 +// AllWithOpts returns all datacenters for the given options. +func (c *DatacenterClient) AllWithOpts(ctx context.Context, opts DatacenterListOpts) ([]*Datacenter, error) { + var allDatacenters []*Datacenter err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/deprecation.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/deprecation.go new file mode 100644 index 000000000..17c6949cb --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/deprecation.go @@ -0,0 +1,59 @@ +package hcloud + +import "time" + +// Deprecatable is a shared interface implemented by all Resources that have a defined deprecation workflow. +type Deprecatable interface { + // IsDeprecated returns true if the resource is marked as deprecated. + IsDeprecated() bool + + // UnavailableAfter returns the time that the deprecated resource will be removed from the API. + // This only returns a valid value if [Deprecatable.IsDeprecated] returned true. + UnavailableAfter() time.Time + + // DeprecationAnnounced returns the time that the deprecation of this resource was announced. + // This only returns a valid value if [Deprecatable.IsDeprecated] returned true. + DeprecationAnnounced() time.Time +} + +// DeprecationInfo contains the information published when a resource is actually deprecated. +type DeprecationInfo struct { + Announced time.Time + UnavailableAfter time.Time +} + +// DeprecatableResource implements the [Deprecatable] interface and can be embedded in structs for Resources that can be +// deprecated. +type DeprecatableResource struct { + Deprecation *DeprecationInfo +} + +// IsDeprecated returns true if the resource is marked as deprecated. +func (d DeprecatableResource) IsDeprecated() bool { + return d.Deprecation != nil +} + +// UnavailableAfter returns the time that the deprecated resource will be removed from the API. +// This only returns a valid value if [Deprecatable.IsDeprecated] returned true. +func (d DeprecatableResource) UnavailableAfter() time.Time { + if !d.IsDeprecated() { + // Return "null" time if resource is not deprecated + return time.Unix(0, 0) + } + + return d.Deprecation.UnavailableAfter +} + +// DeprecationAnnounced returns the time that the deprecation of this resource was announced. +// This only returns a valid value if [Deprecatable.IsDeprecated] returned true. +func (d DeprecatableResource) DeprecationAnnounced() time.Time { + if !d.IsDeprecated() { + // Return "null" time if resource is not deprecated + return time.Unix(0, 0) + } + + return d.Deprecation.Announced +} + +// Make sure that all expected Resources actually implement the interface. +var _ Deprecatable = ServerType{} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/error.go similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/error.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go similarity index 94% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go index 514f23a4d..f0326e35b 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/firewall.go @@ -11,12 +11,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Firewall represents a Firewall in the Hetzner Cloud. type Firewall struct { - ID int + ID int64 Name string Labels map[string]string Created time.Time @@ -80,7 +80,7 @@ type FirewallResource struct { // FirewallResourceServer represents a Server to apply a Firewall on. type FirewallResourceServer struct { - ID int + ID int64 } // FirewallResourceLabelSelector represents a LabelSelector to apply a Firewall on. @@ -94,7 +94,7 @@ type FirewallClient struct { } // GetByID retrieves a Firewall by its ID. If the Firewall does not exist, nil is returned. -func (c *FirewallClient) GetByID(ctx context.Context, id int) (*Firewall, *Response, error) { +func (c *FirewallClient) GetByID(ctx context.Context, id int64) (*Firewall, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/firewalls/%d", id), nil) if err != nil { return nil, nil, err @@ -126,8 +126,8 @@ func (c *FirewallClient) GetByName(ctx context.Context, name string) (*Firewall, // Get retrieves a Firewall by its ID if the input can be parsed as an integer, otherwise it // retrieves a Firewall by its name. If the Firewall does not exist, nil is returned. func (c *FirewallClient) Get(ctx context.Context, idOrName string) (*Firewall, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -140,7 +140,7 @@ type FirewallListOpts struct { } func (l FirewallListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -175,25 +175,7 @@ func (c *FirewallClient) List(ctx context.Context, opts FirewallListOpts) ([]*Fi // All returns all Firewalls. func (c *FirewallClient) All(ctx context.Context) ([]*Firewall, error) { - allFirewalls := []*Firewall{} - - opts := FirewallListOpts{} - opts.PerPage = 50 - - err := c.client.all(func(page int) (*Response, error) { - opts.Page = page - firewalls, resp, err := c.List(ctx, opts) - if err != nil { - return resp, err - } - allFirewalls = append(allFirewalls, firewalls...) - return resp, nil - }) - if err != nil { - return nil, err - } - - return allFirewalls, nil + return c.AllWithOpts(ctx, FirewallListOpts{ListOpts: ListOpts{PerPage: 50}}) } // AllWithOpts returns all Firewalls for the given options. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/floating_ip.go similarity index 96% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/floating_ip.go index e2643a638..924dabbf8 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/floating_ip.go @@ -11,12 +11,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // FloatingIP represents a Floating IP in the Hetzner Cloud. type FloatingIP struct { - ID int + ID int64 Description string Created time.Time IP net.IP @@ -42,7 +42,7 @@ type FloatingIPProtection struct { Delete bool } -// FloatingIPType represents the type of a Floating IP. +// FloatingIPType represents the type of Floating IP. type FloatingIPType string // Floating IP types. @@ -51,7 +51,7 @@ const ( FloatingIPTypeIPv6 FloatingIPType = "ipv6" ) -// changeDNSPtr changes or resets the reverse DNS pointer for a IP address. +// changeDNSPtr changes or resets the reverse DNS pointer for an IP address. // Pass a nil ptr to reset the reverse DNS pointer to its default value. func (f *FloatingIP) changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error) { reqBody := schema.FloatingIPActionChangeDNSPtrRequest{ @@ -95,7 +95,7 @@ type FloatingIPClient struct { // GetByID retrieves a Floating IP by its ID. If the Floating IP does not exist, // nil is returned. -func (c *FloatingIPClient) GetByID(ctx context.Context, id int) (*FloatingIP, *Response, error) { +func (c *FloatingIPClient) GetByID(ctx context.Context, id int64) (*FloatingIP, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/floating_ips/%d", id), nil) if err != nil { return nil, nil, err @@ -127,8 +127,8 @@ func (c *FloatingIPClient) GetByName(ctx context.Context, name string) (*Floatin // Get retrieves a Floating IP by its ID if the input can be parsed as an integer, otherwise it // retrieves a Floating IP by its name. If the Floating IP does not exist, nil is returned. func (c *FloatingIPClient) Get(ctx context.Context, idOrName string) (*FloatingIP, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -141,7 +141,7 @@ type FloatingIPListOpts struct { } func (l FloatingIPListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -181,7 +181,7 @@ func (c *FloatingIPClient) All(ctx context.Context) ([]*FloatingIP, error) { // AllWithOpts returns all Floating IPs for the given options. func (c *FloatingIPClient) AllWithOpts(ctx context.Context, opts FloatingIPListOpts) ([]*FloatingIP, error) { - allFloatingIPs := []*FloatingIP{} + var allFloatingIPs []*FloatingIP err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go similarity index 72% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go index 7adc8745a..0131c0d92 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/hcloud.go @@ -2,4 +2,4 @@ package hcloud // Version is the library's version following Semantic Versioning. -const Version = "1.42.0" // x-release-please-version +const Version = "2.0.0" // x-release-please-version diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/helper.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/helper.go similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/helper.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/helper.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/image.go similarity index 95% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/image.go index 04d61b50c..ac844b5f5 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/image.go @@ -9,12 +9,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Image represents an Image in the Hetzner Cloud. type Image struct { - ID int + ID int64 Name string Type ImageType Status ImageStatus @@ -81,7 +81,7 @@ type ImageClient struct { } // GetByID retrieves an image by its ID. If the image does not exist, nil is returned. -func (c *ImageClient) GetByID(ctx context.Context, id int) (*Image, *Response, error) { +func (c *ImageClient) GetByID(ctx context.Context, id int64) (*Image, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/images/%d", id), nil) if err != nil { return nil, nil, err @@ -132,7 +132,7 @@ func (c *ImageClient) GetByNameAndArchitecture(ctx context.Context, name string, // // Deprecated: Use [ImageClient.GetForArchitecture] instead. func (c *ImageClient) Get(ctx context.Context, idOrName string) (*Image, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) @@ -144,7 +144,7 @@ func (c *ImageClient) Get(ctx context.Context, idOrName string) (*Image, *Respon // In contrast to [ImageClient.Get], this method also returns deprecated images. Depending on your needs you should // check for this in your calling method. func (c *ImageClient) GetForArchitecture(ctx context.Context, idOrName string, architecture Architecture) (*Image, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { return c.GetByID(ctx, id) } return c.GetByNameAndArchitecture(ctx, idOrName, architecture) @@ -163,12 +163,12 @@ type ImageListOpts struct { } func (l ImageListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() for _, typ := range l.Type { vals.Add("type", string(typ)) } if l.BoundTo != nil { - vals.Add("bound_to", strconv.Itoa(l.BoundTo.ID)) + vals.Add("bound_to", strconv.FormatInt(l.BoundTo.ID, 10)) } if l.Name != "" { vals.Add("name", l.Name) diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/internal/instrumentation/metrics.go similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/internal/instrumentation/metrics.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/iso.go similarity index 80% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/iso.go index d5814cb80..9342786db 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/iso.go @@ -7,12 +7,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // ISO represents an ISO image in the Hetzner Cloud. type ISO struct { - ID int + ID int64 Name string Description string Type ISOType @@ -42,7 +42,7 @@ type ISOClient struct { } // GetByID retrieves an ISO by its ID. -func (c *ISOClient) GetByID(ctx context.Context, id int) (*ISO, *Response, error) { +func (c *ISOClient) GetByID(ctx context.Context, id int64) (*ISO, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/isos/%d", id), nil) if err != nil { return nil, nil, err @@ -73,8 +73,8 @@ func (c *ISOClient) GetByName(ctx context.Context, name string) (*ISO, *Response // Get retrieves an ISO by its ID if the input can be parsed as an integer, otherwise it retrieves an ISO by its name. func (c *ISOClient) Get(ctx context.Context, idOrName string) (*ISO, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -89,11 +89,15 @@ type ISOListOpts struct { Architecture []Architecture // IncludeWildcardArchitecture must be set to also return custom ISOs that have no architecture set, if you are // also setting the Architecture field. + // Deprecated: Use [ISOListOpts.IncludeArchitectureWildcard] instead. IncludeWildcardArchitecture bool + // IncludeWildcardArchitecture must be set to also return custom ISOs that have no architecture set, if you are + // also setting the Architecture field. + IncludeArchitectureWildcard bool } func (l ISOListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -103,7 +107,7 @@ func (l ISOListOpts) values() url.Values { for _, arch := range l.Architecture { vals.Add("architecture", string(arch)) } - if l.IncludeWildcardArchitecture { + if l.IncludeArchitectureWildcard || l.IncludeWildcardArchitecture { vals.Add("include_architecture_wildcard", "true") } return vals @@ -134,10 +138,12 @@ func (c *ISOClient) List(ctx context.Context, opts ISOListOpts) ([]*ISO, *Respon // All returns all ISOs. func (c *ISOClient) All(ctx context.Context) ([]*ISO, error) { - allISOs := []*ISO{} + return c.AllWithOpts(ctx, ISOListOpts{ListOpts: ListOpts{PerPage: 50}}) +} - opts := ISOListOpts{} - opts.PerPage = 50 +// AllWithOpts returns all ISOs for the given options. +func (c *ISOClient) AllWithOpts(ctx context.Context, opts ISOListOpts) ([]*ISO, error) { + allISOs := make([]*ISO, 0) err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/labels.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/labels.go similarity index 87% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/labels.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/labels.go index e22000a8f..3dc7d781f 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/labels.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/labels.go @@ -6,8 +6,8 @@ import ( ) var keyRegexp = regexp.MustCompile( - `^([a-z0-9A-Z]((?:[\-_.]|[a-z0-9A-Z]){0,253}[a-z0-9A-Z])?/)?[a-z0-9A-Z]((?:[\-_.]|[a-z0-9A-Z]|){0,62}[a-z0-9A-Z])?$`) -var valueRegexp = regexp.MustCompile(`^(([a-z0-9A-Z](?:[\-_.]|[a-z0-9A-Z]){0,62})?[a-z0-9A-Z]$|$)`) + `^([a-z0-9A-Z]((?:[\-_.]|[a-z0-9A-Z]){0,253}[a-z0-9A-Z])?/)?[a-z0-9A-Z]((?:[\-_.]|[a-z0-9A-Z]|){0,61}[a-z0-9A-Z])?$`) +var valueRegexp = regexp.MustCompile(`^(([a-z0-9A-Z](?:[\-_.]|[a-z0-9A-Z]){0,61})?[a-z0-9A-Z]$|$)`) func ValidateResourceLabels(labels map[string]interface{}) (bool, error) { for k, v := range labels { diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/load_balancer.go similarity index 97% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/load_balancer.go index 81e2e03bb..124831835 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/load_balancer.go @@ -11,12 +11,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // LoadBalancer represents a Load Balancer in the Hetzner Cloud. type LoadBalancer struct { - ID int + ID int64 Name string PublicNet LoadBalancerPublicNet PrivateNet []LoadBalancerPrivateNet @@ -103,7 +103,7 @@ type LoadBalancerAlgorithmType string const ( // LoadBalancerAlgorithmTypeRoundRobin is an algorithm which distributes - // requests to targets in a round robin fashion. + // requests to targets in a round-robin fashion. LoadBalancerAlgorithmTypeRoundRobin LoadBalancerAlgorithmType = "round_robin" // LoadBalancerAlgorithmTypeLeastConnections is an algorithm which distributes // requests to targets with the least number of connections. @@ -116,7 +116,7 @@ type LoadBalancerAlgorithm struct { Type LoadBalancerAlgorithmType } -// LoadBalancerTargetType specifies the type of a Load Balancer target. +// LoadBalancerTargetType specifies the type of Load Balancer target. type LoadBalancerTargetType string const ( @@ -197,7 +197,7 @@ type LoadBalancerProtection struct { Delete bool } -// changeDNSPtr changes or resets the reverse DNS pointer for a IP address. +// changeDNSPtr changes or resets the reverse DNS pointer for an IP address. // Pass a nil ptr to reset the reverse DNS pointer to its default value. func (lb *LoadBalancer) changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error) { reqBody := schema.LoadBalancerActionChangeDNSPtrRequest{ @@ -241,7 +241,7 @@ type LoadBalancerClient struct { } // GetByID retrieves a Load Balancer by its ID. If the Load Balancer does not exist, nil is returned. -func (c *LoadBalancerClient) GetByID(ctx context.Context, id int) (*LoadBalancer, *Response, error) { +func (c *LoadBalancerClient) GetByID(ctx context.Context, id int64) (*LoadBalancer, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/load_balancers/%d", id), nil) if err != nil { return nil, nil, err @@ -273,8 +273,8 @@ func (c *LoadBalancerClient) GetByName(ctx context.Context, name string) (*LoadB // Get retrieves a Load Balancer by its ID if the input can be parsed as an integer, otherwise it // retrieves a Load Balancer by its name. If the Load Balancer does not exist, nil is returned. func (c *LoadBalancerClient) Get(ctx context.Context, idOrName string) (*LoadBalancer, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -287,7 +287,7 @@ type LoadBalancerListOpts struct { } func (l LoadBalancerListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -322,25 +322,7 @@ func (c *LoadBalancerClient) List(ctx context.Context, opts LoadBalancerListOpts // All returns all Load Balancers. func (c *LoadBalancerClient) All(ctx context.Context) ([]*LoadBalancer, error) { - allLoadBalancer := []*LoadBalancer{} - - opts := LoadBalancerListOpts{} - opts.PerPage = 50 - - err := c.client.all(func(page int) (*Response, error) { - opts.Page = page - LoadBalancer, resp, err := c.List(ctx, opts) - if err != nil { - return resp, err - } - allLoadBalancer = append(allLoadBalancer, LoadBalancer...) - return resp, nil - }) - if err != nil { - return nil, err - } - - return allLoadBalancer, nil + return c.AllWithOpts(ctx, LoadBalancerListOpts{ListOpts: ListOpts{PerPage: 50}}) } // AllWithOpts returns all Load Balancers for the given options. @@ -665,7 +647,7 @@ type LoadBalancerAddServiceOptsHTTP struct { StickySessions *bool } -// LoadBalancerAddServiceOptsHealthCheck holds options for specifying an health check +// LoadBalancerAddServiceOptsHealthCheck holds options for specifying a health check // when adding a service to a Load Balancer. type LoadBalancerAddServiceOptsHealthCheck struct { Protocol LoadBalancerServiceProtocol diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/load_balancer_type.go similarity index 86% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/load_balancer_type.go index 5ac748147..29daf9809 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/load_balancer_type.go @@ -6,12 +6,12 @@ import ( "net/url" "strconv" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // LoadBalancerType represents a LoadBalancer type in the Hetzner Cloud. type LoadBalancerType struct { - ID int + ID int64 Name string Description string MaxConnections int @@ -27,7 +27,7 @@ type LoadBalancerTypeClient struct { } // GetByID retrieves a Load Balancer type by its ID. If the Load Balancer type does not exist, nil is returned. -func (c *LoadBalancerTypeClient) GetByID(ctx context.Context, id int) (*LoadBalancerType, *Response, error) { +func (c *LoadBalancerTypeClient) GetByID(ctx context.Context, id int64) (*LoadBalancerType, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/load_balancer_types/%d", id), nil) if err != nil { return nil, nil, err @@ -59,8 +59,8 @@ func (c *LoadBalancerTypeClient) GetByName(ctx context.Context, name string) (*L // Get retrieves a Load Balancer type by its ID if the input can be parsed as an integer, otherwise it // retrieves a Load Balancer type by its name. If the Load Balancer type does not exist, nil is returned. func (c *LoadBalancerTypeClient) Get(ctx context.Context, idOrName string) (*LoadBalancerType, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -73,7 +73,7 @@ type LoadBalancerTypeListOpts struct { } func (l LoadBalancerTypeListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -108,10 +108,12 @@ func (c *LoadBalancerTypeClient) List(ctx context.Context, opts LoadBalancerType // All returns all Load Balancer types. func (c *LoadBalancerTypeClient) All(ctx context.Context) ([]*LoadBalancerType, error) { - allLoadBalancerTypes := []*LoadBalancerType{} + return c.AllWithOpts(ctx, LoadBalancerTypeListOpts{ListOpts: ListOpts{PerPage: 50}}) +} - opts := LoadBalancerTypeListOpts{} - opts.PerPage = 50 +// AllWithOpts returns all Load Balancer types for the given options. +func (c *LoadBalancerTypeClient) AllWithOpts(ctx context.Context, opts LoadBalancerTypeListOpts) ([]*LoadBalancerType, error) { + var allLoadBalancerTypes []*LoadBalancerType err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/location.go similarity index 84% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/location.go index c749bdf0b..a0b81d948 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/location.go @@ -6,12 +6,12 @@ import ( "net/url" "strconv" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Location represents a location in the Hetzner Cloud. type Location struct { - ID int + ID int64 Name string Description string Country string @@ -27,7 +27,7 @@ type LocationClient struct { } // GetByID retrieves a location by its ID. If the location does not exist, nil is returned. -func (c *LocationClient) GetByID(ctx context.Context, id int) (*Location, *Response, error) { +func (c *LocationClient) GetByID(ctx context.Context, id int64) (*Location, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/locations/%d", id), nil) if err != nil { return nil, nil, err @@ -59,8 +59,8 @@ func (c *LocationClient) GetByName(ctx context.Context, name string) (*Location, // Get retrieves a location by its ID if the input can be parsed as an integer, otherwise it // retrieves a location by its name. If the location does not exist, nil is returned. func (c *LocationClient) Get(ctx context.Context, idOrName string) (*Location, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -73,7 +73,7 @@ type LocationListOpts struct { } func (l LocationListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -108,10 +108,12 @@ func (c *LocationClient) List(ctx context.Context, opts LocationListOpts) ([]*Lo // All returns all locations. func (c *LocationClient) All(ctx context.Context) ([]*Location, error) { - allLocations := []*Location{} + return c.AllWithOpts(ctx, LocationListOpts{ListOpts: ListOpts{PerPage: 50}}) +} - opts := LocationListOpts{} - opts.PerPage = 50 +// AllWithOpts returns all locations for the given options. +func (c *LocationClient) AllWithOpts(ctx context.Context, opts LocationListOpts) ([]*Location, error) { + var allLocations []*Location err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/network.go similarity index 92% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/network.go index 5a196816d..57b16e5b5 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/network.go @@ -11,7 +11,7 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // NetworkZone specifies a network zone. @@ -36,7 +36,7 @@ const ( // Network represents a network in the Hetzner Cloud. type Network struct { - ID int + ID int64 Name string Created time.Time IPRange *net.IPNet @@ -45,6 +45,9 @@ type Network struct { Servers []*Server Protection NetworkProtection Labels map[string]string + + // ExposeRoutesToVSwitch indicates if the routes from this network should be exposed to the vSwitch connection. + ExposeRoutesToVSwitch bool } // NetworkSubnet represents a subnet of a network in the Hetzner Cloud. @@ -53,7 +56,7 @@ type NetworkSubnet struct { IPRange *net.IPNet NetworkZone NetworkZone Gateway net.IP - VSwitchID int + VSwitchID int64 } // NetworkRoute represents a route of a network. @@ -73,7 +76,7 @@ type NetworkClient struct { } // GetByID retrieves a network by its ID. If the network does not exist, nil is returned. -func (c *NetworkClient) GetByID(ctx context.Context, id int) (*Network, *Response, error) { +func (c *NetworkClient) GetByID(ctx context.Context, id int64) (*Network, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/networks/%d", id), nil) if err != nil { return nil, nil, err @@ -105,8 +108,8 @@ func (c *NetworkClient) GetByName(ctx context.Context, name string) (*Network, * // Get retrieves a network by its ID if the input can be parsed as an integer, otherwise it // retrieves a network by its name. If the network does not exist, nil is returned. func (c *NetworkClient) Get(ctx context.Context, idOrName string) (*Network, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -119,7 +122,7 @@ type NetworkListOpts struct { } func (l NetworkListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -190,6 +193,9 @@ func (c *NetworkClient) Delete(ctx context.Context, network *Network) (*Response type NetworkUpdateOpts struct { Name string Labels map[string]string + // ExposeRoutesToVSwitch indicates if the routes from this network should be exposed to the vSwitch connection. + // The exposing only takes effect if a vSwitch connection is active. + ExposeRoutesToVSwitch *bool } // Update updates a network. @@ -200,6 +206,10 @@ func (c *NetworkClient) Update(ctx context.Context, network *Network, opts Netwo if opts.Labels != nil { reqBody.Labels = &opts.Labels } + if opts.ExposeRoutesToVSwitch != nil { + reqBody.ExposeRoutesToVSwitch = opts.ExposeRoutesToVSwitch + } + reqBodyData, err := json.Marshal(reqBody) if err != nil { return nil, nil, err @@ -226,6 +236,9 @@ type NetworkCreateOpts struct { Subnets []NetworkSubnet Routes []NetworkRoute Labels map[string]string + // ExposeRoutesToVSwitch indicates if the routes from this network should be exposed to the vSwitch connection. + // The exposing only takes effect if a vSwitch connection is active. + ExposeRoutesToVSwitch bool } // Validate checks if options are valid. @@ -245,8 +258,9 @@ func (c *NetworkClient) Create(ctx context.Context, opts NetworkCreateOpts) (*Ne return nil, nil, err } reqBody := schema.NetworkCreateRequest{ - Name: opts.Name, - IPRange: opts.IPRange.String(), + Name: opts.Name, + IPRange: opts.IPRange.String(), + ExposeRoutesToVSwitch: opts.ExposeRoutesToVSwitch, } for _, subnet := range opts.Subnets { s := schema.NetworkSubnet{ diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/placement_group.go similarity index 96% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/placement_group.go index 07fedad5e..acf95b6dd 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/placement_group.go @@ -10,16 +10,16 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // PlacementGroup represents a Placement Group in the Hetzner Cloud. type PlacementGroup struct { - ID int + ID int64 Name string Labels map[string]string Created time.Time - Servers []int + Servers []int64 Type PlacementGroupType } @@ -37,7 +37,7 @@ type PlacementGroupClient struct { } // GetByID retrieves a PlacementGroup by its ID. If the PlacementGroup does not exist, nil is returned. -func (c *PlacementGroupClient) GetByID(ctx context.Context, id int) (*PlacementGroup, *Response, error) { +func (c *PlacementGroupClient) GetByID(ctx context.Context, id int64) (*PlacementGroup, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/placement_groups/%d", id), nil) if err != nil { return nil, nil, err @@ -69,8 +69,8 @@ func (c *PlacementGroupClient) GetByName(ctx context.Context, name string) (*Pla // Get retrieves a PlacementGroup by its ID if the input can be parsed as an integer, otherwise it // retrieves a PlacementGroup by its name. If the PlacementGroup does not exist, nil is returned. func (c *PlacementGroupClient) Get(ctx context.Context, idOrName string) (*PlacementGroup, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -84,7 +84,7 @@ type PlacementGroupListOpts struct { } func (l PlacementGroupListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/pricing.go similarity index 98% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/pricing.go index 836f391ff..2a1b96b76 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/pricing.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/pricing.go @@ -3,7 +3,7 @@ package hcloud import ( "context" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Pricing specifies pricing information for various resources. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/primary_ip.go similarity index 88% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/primary_ip.go index e328b9b36..a1a82f11a 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/primary_ip.go @@ -10,12 +10,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // PrimaryIP defines a Primary IP. type PrimaryIP struct { - ID int + ID int64 IP net.IP Network *net.IPNet Labels map[string]string @@ -23,7 +23,7 @@ type PrimaryIP struct { Type PrimaryIPType Protection PrimaryIPProtection DNSPtr map[string]string - AssigneeID int + AssigneeID int64 AssigneeType string AutoDelete bool Blocked bool @@ -43,6 +43,32 @@ type PrimaryIPDNSPTR struct { IP string } +// changeDNSPtr changes or resets the reverse DNS pointer for a IP address. +// Pass a nil ptr to reset the reverse DNS pointer to its default value. +func (p *PrimaryIP) changeDNSPtr(ctx context.Context, client *Client, ip net.IP, ptr *string) (*Action, *Response, error) { + reqBody := schema.PrimaryIPActionChangeDNSPtrRequest{ + IP: ip.String(), + DNSPtr: ptr, + } + reqBodyData, err := json.Marshal(reqBody) + if err != nil { + return nil, nil, err + } + + path := fmt.Sprintf("/primary_ips/%d/actions/change_dns_ptr", p.ID) + req, err := client.NewRequest(ctx, "POST", path, bytes.NewReader(reqBodyData)) + if err != nil { + return nil, nil, err + } + + var respBody PrimaryIPChangeDNSPtrResult + resp, err := client.Do(req, &respBody) + if err != nil { + return nil, resp, err + } + return ActionFromSchema(respBody.Action), resp, nil +} + // GetDNSPtrForIP searches for the dns assigned to the given IP address. // It returns an error if there is no dns set for the given IP address. func (p *PrimaryIP) GetDNSPtrForIP(ip net.IP) (string, error) { @@ -66,7 +92,7 @@ const ( // PrimaryIPCreateOpts defines the request to // create a Primary IP. type PrimaryIPCreateOpts struct { - AssigneeID *int `json:"assignee_id,omitempty"` + AssigneeID *int64 `json:"assignee_id,omitempty"` AssigneeType string `json:"assignee_type"` AutoDelete *bool `json:"auto_delete,omitempty"` Datacenter string `json:"datacenter,omitempty"` @@ -93,8 +119,8 @@ type PrimaryIPUpdateOpts struct { // PrimaryIPAssignOpts defines the request to // assign a Primary IP to an assignee (usually a server). type PrimaryIPAssignOpts struct { - ID int - AssigneeID int `json:"assignee_id"` + ID int64 + AssigneeID int64 `json:"assignee_id"` AssigneeType string `json:"assignee_type"` } @@ -107,7 +133,7 @@ type PrimaryIPAssignResult struct { // PrimaryIPChangeDNSPtrOpts defines the request to // change a DNS PTR entry from a Primary IP. type PrimaryIPChangeDNSPtrOpts struct { - ID int + ID int64 DNSPtr string `json:"dns_ptr"` IP string `json:"ip"` } @@ -121,7 +147,7 @@ type PrimaryIPChangeDNSPtrResult struct { // PrimaryIPChangeProtectionOpts defines the request to // change protection configuration of a Primary IP. type PrimaryIPChangeProtectionOpts struct { - ID int + ID int64 Delete bool `json:"delete"` } @@ -137,7 +163,7 @@ type PrimaryIPClient struct { } // GetByID retrieves a Primary IP by its ID. If the Primary IP does not exist, nil is returned. -func (c *PrimaryIPClient) GetByID(ctx context.Context, id int) (*PrimaryIP, *Response, error) { +func (c *PrimaryIPClient) GetByID(ctx context.Context, id int64) (*PrimaryIP, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/primary_ips/%d", id), nil) if err != nil { return nil, nil, err @@ -181,8 +207,8 @@ func (c *PrimaryIPClient) GetByName(ctx context.Context, name string) (*PrimaryI // Get retrieves a Primary IP by its ID if the input can be parsed as an integer, otherwise it // retrieves a Primary IP by its name. If the Primary IP does not exist, nil is returned. func (c *PrimaryIPClient) Get(ctx context.Context, idOrName string) (*PrimaryIP, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -196,7 +222,7 @@ type PrimaryIPListOpts struct { } func (l PrimaryIPListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -337,7 +363,7 @@ func (c *PrimaryIPClient) Assign(ctx context.Context, opts PrimaryIPAssignOpts) } // Unassign a Primary IP from a resource. -func (c *PrimaryIPClient) Unassign(ctx context.Context, id int) (*Action, *Response, error) { +func (c *PrimaryIPClient) Unassign(ctx context.Context, id int64) (*Action, *Response, error) { path := fmt.Sprintf("/primary_ips/%d/actions/unassign", id) req, err := c.client.NewRequest(ctx, "POST", path, bytes.NewReader([]byte{})) if err != nil { diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/rdns.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/rdns.go similarity index 80% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/rdns.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/rdns.go index 891ea2bf8..f53c030da 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/rdns.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/rdns.go @@ -7,7 +7,7 @@ import ( ) // RDNSSupporter defines functions to change and lookup reverse dns entries. -// currently implemented by Server, FloatingIP and LoadBalancer. +// currently implemented by Server, FloatingIP, PrimaryIP and LoadBalancer. type RDNSSupporter interface { // changeDNSPtr changes or resets the reverse DNS pointer for a IP address. // Pass a nil ptr to reset the reverse DNS pointer to its default value. @@ -17,7 +17,7 @@ type RDNSSupporter interface { GetDNSPtrForIP(ip net.IP) (string, error) } -// RDNSClient simplifys the handling objects which support reverse dns entries. +// RDNSClient simplifies the handling objects which support reverse dns entries. type RDNSClient struct { client *Client } @@ -44,3 +44,9 @@ func RDNSLookup(i interface{}, ip net.IP) (string, error) { return rdns.GetDNSPtrForIP(ip) } + +// Make sure that all expected Resources actually implement the interface. +var _ RDNSSupporter = &FloatingIP{} +var _ RDNSSupporter = &PrimaryIP{} +var _ RDNSSupporter = &Server{} +var _ RDNSSupporter = &LoadBalancer{} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/resource.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/resource.go similarity index 89% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/resource.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/resource.go index 8a734dfd6..a74b2cf7a 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/resource.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/resource.go @@ -2,6 +2,6 @@ package hcloud // Resource defines the schema of a resource. type Resource struct { - ID int + ID int64 Type string } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema.go similarity index 97% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema.go index b72db88d2..bdd5a54e7 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema.go @@ -6,7 +6,7 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // This file provides converter functions to convert models in the @@ -278,15 +278,19 @@ func ServerPrivateNetFromSchema(s schema.ServerPrivateNet) ServerPrivateNet { // ServerTypeFromSchema converts a schema.ServerType to a ServerType. func ServerTypeFromSchema(s schema.ServerType) *ServerType { st := &ServerType{ - ID: s.ID, - Name: s.Name, - Description: s.Description, - Cores: s.Cores, - Memory: s.Memory, - Disk: s.Disk, - StorageType: StorageType(s.StorageType), - CPUType: CPUType(s.CPUType), - Architecture: Architecture(s.Architecture), + ID: s.ID, + Name: s.Name, + Description: s.Description, + Cores: s.Cores, + Memory: s.Memory, + Disk: s.Disk, + StorageType: StorageType(s.StorageType), + CPUType: CPUType(s.CPUType), + Architecture: Architecture(s.Architecture), + IncludedTraffic: s.IncludedTraffic, + DeprecatableResource: DeprecatableResource{ + DeprecationFromSchema(s.Deprecation), + }, } for _, price := range s.Prices { st.Pricings = append(st.Pricings, ServerTypeLocationPricing{ @@ -301,6 +305,7 @@ func ServerTypeFromSchema(s schema.ServerType) *ServerType { }, }) } + return st } @@ -398,7 +403,8 @@ func NetworkFromSchema(s schema.Network) *Network { Protection: NetworkProtection{ Delete: s.Protection.Delete, }, - Labels: map[string]string{}, + Labels: map[string]string{}, + ExposeRoutesToVSwitch: s.ExposeRoutesToVSwitch, } _, n.IPRange, _ = net.ParseCIDR(s.IPRange) @@ -887,7 +893,7 @@ func loadBalancerCreateOptsToSchema(opts LoadBalancerCreateOpts) schema.LoadBala } if opts.Location != nil { if opts.Location.ID != 0 { - req.Location = Ptr(strconv.Itoa(opts.Location.ID)) + req.Location = Ptr(strconv.FormatInt(opts.Location.ID, 10)) } else { req.Location = Ptr(opts.Location.Name) } @@ -937,7 +943,7 @@ func loadBalancerCreateOptsToSchema(opts LoadBalancerCreateOpts) schema.LoadBala } } if service.HTTP.Certificates != nil { - certificates := []int{} + certificates := []int64{} for _, certificate := range service.HTTP.Certificates { certificates = append(certificates, certificate.ID) } @@ -992,7 +998,7 @@ func loadBalancerAddServiceOptsToSchema(opts LoadBalancerAddServiceOpts) schema. req.HTTP.CookieLifetime = Ptr(int(opts.HTTP.CookieLifetime.Seconds())) } if opts.HTTP.Certificates != nil { - certificates := []int{} + certificates := []int64{} for _, certificate := range opts.HTTP.Certificates { certificates = append(certificates, certificate.ID) } @@ -1044,7 +1050,7 @@ func loadBalancerUpdateServiceOptsToSchema(opts LoadBalancerUpdateServiceOpts) s req.HTTP.CookieLifetime = Ptr(int(opts.HTTP.CookieLifetime.Seconds())) } if opts.HTTP.Certificates != nil { - certificates := []int{} + certificates := []int64{} for _, certificate := range opts.HTTP.Certificates { certificates = append(certificates, certificate.ID) } @@ -1248,3 +1254,15 @@ func loadBalancerMetricsFromSchema(s *schema.LoadBalancerGetMetricsResponse) (*L return &ms, nil } + +// DeprecationFromSchema converts a [schema.DeprecationInfo] to a [DeprecationInfo]. +func DeprecationFromSchema(s *schema.DeprecationInfo) *DeprecationInfo { + if s == nil { + return nil + } + + return &DeprecationInfo{ + Announced: s.Announced, + UnavailableAfter: s.UnavailableAfter, + } +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/action.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/action.go similarity index 93% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/action.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/action.go index df4d7cf71..49ac96a22 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/action.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/action.go @@ -4,7 +4,7 @@ import "time" // Action defines the schema of an action. type Action struct { - ID int `json:"id"` + ID int64 `json:"id"` Status string `json:"status"` Command string `json:"command"` Progress int `json:"progress"` @@ -16,7 +16,7 @@ type Action struct { // ActionResourceReference defines the schema of an action resource reference. type ActionResourceReference struct { - ID int `json:"id"` + ID int64 `json:"id"` Type string `json:"type"` } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/certificate.go similarity index 97% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/certificate.go index a81b807a2..eb7b03ce2 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/certificate.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/certificate.go @@ -4,7 +4,7 @@ import "time" // CertificateUsedByRef defines the schema of a resource using a certificate. type CertificateUsedByRef struct { - ID int `json:"id"` + ID int64 `json:"id"` Type string `json:"type"` } @@ -16,7 +16,7 @@ type CertificateStatusRef struct { // Certificate defines the schema of an certificate. type Certificate struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Labels map[string]string `json:"labels"` Type string `json:"type"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/datacenter.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/datacenter.go similarity index 84% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/datacenter.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/datacenter.go index 3e8178e89..eaa12429f 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/datacenter.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/datacenter.go @@ -2,13 +2,13 @@ package schema // Datacenter defines the schema of a datacenter. type Datacenter struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Description string `json:"description"` Location Location `json:"location"` ServerTypes struct { - Supported []int `json:"supported"` - Available []int `json:"available"` + Supported []int64 `json:"supported"` + Available []int64 `json:"available"` } `json:"server_types"` } diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/deprecation.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/deprecation.go new file mode 100644 index 000000000..87292f78b --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/deprecation.go @@ -0,0 +1,12 @@ +package schema + +import "time" + +type DeprecationInfo struct { + Announced time.Time `json:"announced"` + UnavailableAfter time.Time `json:"unavailable_after"` +} + +type DeprecatableResource struct { + Deprecation *DeprecationInfo `json:"deprecation"` +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/error.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/error.go similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/error.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/error.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go similarity index 98% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/firewall.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go index b085bbb13..371e648f1 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/firewall.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/firewall.go @@ -4,7 +4,7 @@ import "time" // Firewall defines the schema of a Firewall. type Firewall struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Labels map[string]string `json:"labels"` Created time.Time `json:"created"` @@ -54,7 +54,7 @@ type FirewallResourceLabelSelector struct { // FirewallResourceServer defines the schema of a Server to apply a Firewall on. type FirewallResourceServer struct { - ID int `json:"id"` + ID int64 `json:"id"` } // FirewallCreateResponse defines the schema of the response when creating a Firewall. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/floating_ip.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/floating_ip.go similarity index 95% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/floating_ip.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/floating_ip.go index 37295dad9..6256b0d96 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/floating_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/floating_ip.go @@ -4,12 +4,12 @@ import "time" // FloatingIP defines the schema of a Floating IP. type FloatingIP struct { - ID int `json:"id"` + ID int64 `json:"id"` Description *string `json:"description"` Created time.Time `json:"created"` IP string `json:"ip"` Type string `json:"type"` - Server *int `json:"server"` + Server *int64 `json:"server"` DNSPtr []FloatingIPDNSPtr `json:"dns_ptr"` HomeLocation Location `json:"home_location"` Blocked bool `json:"blocked"` @@ -59,7 +59,7 @@ type FloatingIPListResponse struct { type FloatingIPCreateRequest struct { Type string `json:"type"` HomeLocation *string `json:"home_location,omitempty"` - Server *int `json:"server,omitempty"` + Server *int64 `json:"server,omitempty"` Description *string `json:"description,omitempty"` Labels *map[string]string `json:"labels,omitempty"` Name *string `json:"name,omitempty"` @@ -75,7 +75,7 @@ type FloatingIPCreateResponse struct { // FloatingIPActionAssignRequest defines the schema of the request to // create an assign Floating IP action. type FloatingIPActionAssignRequest struct { - Server int `json:"server"` + Server int64 `json:"server"` } // FloatingIPActionAssignResponse defines the schema of the response when diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/image.go similarity index 95% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/image.go index 76775b131..1520935ec 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/image.go @@ -4,7 +4,7 @@ import "time" // Image defines the schema of an image. type Image struct { - ID int `json:"id"` + ID int64 `json:"id"` Status string `json:"status"` Type string `json:"type"` Name *string `json:"name"` @@ -13,7 +13,7 @@ type Image struct { DiskSize float32 `json:"disk_size"` Created time.Time `json:"created"` CreatedFrom *ImageCreatedFrom `json:"created_from"` - BoundTo *int `json:"bound_to"` + BoundTo *int64 `json:"bound_to"` OSFlavor string `json:"os_flavor"` OSVersion *string `json:"os_version"` Architecture string `json:"architecture"` @@ -31,7 +31,7 @@ type ImageProtection struct { // ImageCreatedFrom defines the schema of the images created from reference. type ImageCreatedFrom struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/iso.go similarity index 94% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/iso.go index dfcc4e347..4f89dd046 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/iso.go @@ -4,7 +4,7 @@ import "time" // ISO defines the schema of an ISO image. type ISO struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Description string `json:"description"` Type string `json:"type"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/load_balancer.go similarity index 91% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/load_balancer.go index 68adf5eb6..7e1c4f5da 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/load_balancer.go @@ -3,7 +3,7 @@ package schema import "time" type LoadBalancer struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` PublicNet LoadBalancerPublicNet `json:"public_net"` PrivateNet []LoadBalancerPrivateNet `json:"private_net"` @@ -37,7 +37,7 @@ type LoadBalancerPublicNetIPv6 struct { } type LoadBalancerPrivateNet struct { - Network int `json:"network"` + Network int64 `json:"network"` IP string `json:"ip"` } @@ -59,11 +59,11 @@ type LoadBalancerService struct { } type LoadBalancerServiceHTTP struct { - CookieName string `json:"cookie_name"` - CookieLifetime int `json:"cookie_lifetime"` - Certificates []int `json:"certificates"` - RedirectHTTP bool `json:"redirect_http"` - StickySessions bool `json:"sticky_sessions"` + CookieName string `json:"cookie_name"` + CookieLifetime int `json:"cookie_lifetime"` + Certificates []int64 `json:"certificates"` + RedirectHTTP bool `json:"redirect_http"` + StickySessions bool `json:"sticky_sessions"` } type LoadBalancerServiceHealthCheck struct { @@ -99,7 +99,7 @@ type LoadBalancerTargetHealthStatus struct { } type LoadBalancerTargetServer struct { - ID int `json:"id"` + ID int64 `json:"id"` } type LoadBalancerTargetLabelSelector struct { @@ -127,7 +127,7 @@ type LoadBalancerActionAddTargetRequest struct { } type LoadBalancerActionAddTargetRequestServer struct { - ID int `json:"id"` + ID int64 `json:"id"` } type LoadBalancerActionAddTargetRequestLabelSelector struct { @@ -150,7 +150,7 @@ type LoadBalancerActionRemoveTargetRequest struct { } type LoadBalancerActionRemoveTargetRequestServer struct { - ID int `json:"id"` + ID int64 `json:"id"` } type LoadBalancerActionRemoveTargetRequestLabelSelector struct { @@ -175,11 +175,11 @@ type LoadBalancerActionAddServiceRequest struct { } type LoadBalancerActionAddServiceRequestHTTP struct { - CookieName *string `json:"cookie_name,omitempty"` - CookieLifetime *int `json:"cookie_lifetime,omitempty"` - Certificates *[]int `json:"certificates,omitempty"` - RedirectHTTP *bool `json:"redirect_http,omitempty"` - StickySessions *bool `json:"sticky_sessions,omitempty"` + CookieName *string `json:"cookie_name,omitempty"` + CookieLifetime *int `json:"cookie_lifetime,omitempty"` + Certificates *[]int64 `json:"certificates,omitempty"` + RedirectHTTP *bool `json:"redirect_http,omitempty"` + StickySessions *bool `json:"sticky_sessions,omitempty"` } type LoadBalancerActionAddServiceRequestHealthCheck struct { @@ -213,11 +213,11 @@ type LoadBalancerActionUpdateServiceRequest struct { } type LoadBalancerActionUpdateServiceRequestHTTP struct { - CookieName *string `json:"cookie_name,omitempty"` - CookieLifetime *int `json:"cookie_lifetime,omitempty"` - Certificates *[]int `json:"certificates,omitempty"` - RedirectHTTP *bool `json:"redirect_http,omitempty"` - StickySessions *bool `json:"sticky_sessions,omitempty"` + CookieName *string `json:"cookie_name,omitempty"` + CookieLifetime *int `json:"cookie_lifetime,omitempty"` + Certificates *[]int64 `json:"certificates,omitempty"` + RedirectHTTP *bool `json:"redirect_http,omitempty"` + StickySessions *bool `json:"sticky_sessions,omitempty"` } type LoadBalancerActionUpdateServiceRequestHealthCheck struct { @@ -259,7 +259,7 @@ type LoadBalancerCreateRequest struct { Targets []LoadBalancerCreateRequestTarget `json:"targets,omitempty"` Services []LoadBalancerCreateRequestService `json:"services,omitempty"` PublicInterface *bool `json:"public_interface,omitempty"` - Network *int `json:"network,omitempty"` + Network *int64 `json:"network,omitempty"` } type LoadBalancerCreateRequestAlgorithm struct { @@ -275,7 +275,7 @@ type LoadBalancerCreateRequestTarget struct { } type LoadBalancerCreateRequestTargetServer struct { - ID int `json:"id"` + ID int64 `json:"id"` } type LoadBalancerCreateRequestTargetLabelSelector struct { @@ -296,11 +296,11 @@ type LoadBalancerCreateRequestService struct { } type LoadBalancerCreateRequestServiceHTTP struct { - CookieName *string `json:"cookie_name,omitempty"` - CookieLifetime *int `json:"cookie_lifetime,omitempty"` - Certificates *[]int `json:"certificates,omitempty"` - RedirectHTTP *bool `json:"redirect_http,omitempty"` - StickySessions *bool `json:"sticky_sessions,omitempty"` + CookieName *string `json:"cookie_name,omitempty"` + CookieLifetime *int `json:"cookie_lifetime,omitempty"` + Certificates *[]int64 `json:"certificates,omitempty"` + RedirectHTTP *bool `json:"redirect_http,omitempty"` + StickySessions *bool `json:"sticky_sessions,omitempty"` } type LoadBalancerCreateRequestServiceHealthCheck struct { @@ -351,7 +351,7 @@ type LoadBalancerActionChangeAlgorithmResponse struct { } type LoadBalancerActionAttachToNetworkRequest struct { - Network int `json:"network"` + Network int64 `json:"network"` IP *string `json:"ip,omitempty"` } @@ -360,7 +360,7 @@ type LoadBalancerActionAttachToNetworkResponse struct { } type LoadBalancerActionDetachFromNetworkRequest struct { - Network int `json:"network"` + Network int64 `json:"network"` } type LoadBalancerActionDetachFromNetworkResponse struct { diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer_type.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/load_balancer_type.go similarity index 94% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer_type.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/load_balancer_type.go index b0baf0489..09ac43d7a 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/load_balancer_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/load_balancer_type.go @@ -2,7 +2,7 @@ package schema // LoadBalancerType defines the schema of a LoadBalancer type. type LoadBalancerType struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Description string `json:"description"` MaxConnections int `json:"max_connections"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/location.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/location.go similarity index 95% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/location.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/location.go index 3dd58ad5e..e07306071 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/location.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/location.go @@ -2,7 +2,7 @@ package schema // Location defines the schema of a location. type Location struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Description string `json:"description"` Country string `json:"country"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/meta.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/meta.go similarity index 100% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/meta.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/meta.go diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/network.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/network.go similarity index 75% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/network.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/network.go index 87e184d95..2344aea45 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/network.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/network.go @@ -4,15 +4,16 @@ import "time" // Network defines the schema of a network. type Network struct { - ID int `json:"id"` - Name string `json:"name"` - Created time.Time `json:"created"` - IPRange string `json:"ip_range"` - Subnets []NetworkSubnet `json:"subnets"` - Routes []NetworkRoute `json:"routes"` - Servers []int `json:"servers"` - Protection NetworkProtection `json:"protection"` - Labels map[string]string `json:"labels"` + ID int64 `json:"id"` + Name string `json:"name"` + Created time.Time `json:"created"` + IPRange string `json:"ip_range"` + Subnets []NetworkSubnet `json:"subnets"` + Routes []NetworkRoute `json:"routes"` + Servers []int64 `json:"servers"` + Protection NetworkProtection `json:"protection"` + Labels map[string]string `json:"labels"` + ExposeRoutesToVSwitch bool `json:"expose_routes_to_vswitch"` } // NetworkSubnet represents a subnet of a network. @@ -21,7 +22,7 @@ type NetworkSubnet struct { IPRange string `json:"ip_range"` NetworkZone string `json:"network_zone"` Gateway string `json:"gateway,omitempty"` - VSwitchID int `json:"vswitch_id,omitempty"` + VSwitchID int64 `json:"vswitch_id,omitempty"` } // NetworkRoute represents a route of a network. @@ -37,8 +38,9 @@ type NetworkProtection struct { // NetworkUpdateRequest defines the schema of the request to update a network. type NetworkUpdateRequest struct { - Name string `json:"name,omitempty"` - Labels *map[string]string `json:"labels,omitempty"` + Name string `json:"name,omitempty"` + Labels *map[string]string `json:"labels,omitempty"` + ExposeRoutesToVSwitch *bool `json:"expose_routes_to_vswitch,omitempty"` } // NetworkUpdateResponse defines the schema of the response when updating a network. @@ -60,11 +62,12 @@ type NetworkGetResponse struct { // NetworkCreateRequest defines the schema of the request to create a network. type NetworkCreateRequest struct { - Name string `json:"name"` - IPRange string `json:"ip_range"` - Subnets []NetworkSubnet `json:"subnets,omitempty"` - Routes []NetworkRoute `json:"routes,omitempty"` - Labels *map[string]string `json:"labels,omitempty"` + Name string `json:"name"` + IPRange string `json:"ip_range"` + Subnets []NetworkSubnet `json:"subnets,omitempty"` + Routes []NetworkRoute `json:"routes,omitempty"` + Labels *map[string]string `json:"labels,omitempty"` + ExposeRoutesToVSwitch bool `json:"expose_routes_to_vswitch"` } // NetworkCreateResponse defines the schema of the response when @@ -92,7 +95,7 @@ type NetworkActionAddSubnetRequest struct { IPRange string `json:"ip_range,omitempty"` NetworkZone string `json:"network_zone"` Gateway string `json:"gateway"` - VSwitchID int `json:"vswitch_id,omitempty"` + VSwitchID int64 `json:"vswitch_id,omitempty"` } // NetworkActionAddSubnetResponse defines the schema of the response when diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/placement_group.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/placement_group.go similarity index 92% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/placement_group.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/placement_group.go index 6bee4390c..671bd6bed 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/placement_group.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/placement_group.go @@ -3,11 +3,11 @@ package schema import "time" type PlacementGroup struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Labels map[string]string `json:"labels"` Created time.Time `json:"created"` - Servers []int `json:"servers"` + Servers []int64 `json:"servers"` Type string `json:"type"` } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/pricing.go similarity index 97% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/pricing.go index 0c06c73d2..192352f5d 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/pricing.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/pricing.go @@ -61,7 +61,7 @@ type PricingServerBackup struct { // PricingServerType defines the schema of pricing information for a server type. type PricingServerType struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Prices []PricingServerTypePrice `json:"prices"` } @@ -76,7 +76,7 @@ type PricingServerTypePrice struct { // PricingLoadBalancerType defines the schema of pricing information for a Load Balancer type. type PricingLoadBalancerType struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Prices []PricingLoadBalancerTypePrice `json:"prices"` } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/primary_ip.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go similarity index 83% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/primary_ip.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go index d232a732d..b685c386f 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/primary_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/primary_ip.go @@ -4,14 +4,14 @@ import "time" // PrimaryIP defines a Primary IP. type PrimaryIP struct { - ID int `json:"id"` + ID int64 `json:"id"` IP string `json:"ip"` Labels map[string]string `json:"labels"` Name string `json:"name"` Type string `json:"type"` Protection PrimaryIPProtection `json:"protection"` DNSPtr []PrimaryIPDNSPTR `json:"dns_ptr"` - AssigneeID int `json:"assignee_id"` + AssigneeID int64 `json:"assignee_id"` AssigneeType string `json:"assignee_type"` AutoDelete bool `json:"auto_delete"` Blocked bool `json:"blocked"` @@ -53,3 +53,10 @@ type PrimaryIPListResult struct { type PrimaryIPUpdateResult struct { PrimaryIP PrimaryIP `json:"primary_ip"` } + +// PrimaryIPActionChangeDNSPtrRequest defines the schema for the request to +// change a Primary IP's reverse DNS pointer. +type PrimaryIPActionChangeDNSPtrRequest struct { + IP string `json:"ip"` + DNSPtr *string `json:"dns_ptr"` +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server.go similarity index 93% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server.go index 4786b1f9a..39a10b064 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server.go @@ -4,7 +4,7 @@ import "time" // Server defines the schema of a server. type Server struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Status string `json:"status"` Created time.Time `json:"created"` @@ -22,7 +22,7 @@ type Server struct { Image *Image `json:"image"` Protection ServerProtection `json:"protection"` Labels map[string]string `json:"labels"` - Volumes []int `json:"volumes"` + Volumes []int64 `json:"volumes"` PrimaryDiskSize int `json:"primary_disk_size"` PlacementGroup *PlacementGroup `json:"placement_group"` } @@ -38,14 +38,14 @@ type ServerProtection struct { type ServerPublicNet struct { IPv4 ServerPublicNetIPv4 `json:"ipv4"` IPv6 ServerPublicNetIPv6 `json:"ipv6"` - FloatingIPs []int `json:"floating_ips"` + FloatingIPs []int64 `json:"floating_ips"` Firewalls []ServerFirewall `json:"firewalls"` } // ServerPublicNetIPv4 defines the schema of a server's public // network information for an IPv4. type ServerPublicNetIPv4 struct { - ID int `json:"id"` + ID int64 `json:"id"` IP string `json:"ip"` Blocked bool `json:"blocked"` DNSPtr string `json:"dns_ptr"` @@ -54,7 +54,7 @@ type ServerPublicNetIPv4 struct { // ServerPublicNetIPv6 defines the schema of a server's public // network information for an IPv6. type ServerPublicNetIPv6 struct { - ID int `json:"id"` + ID int64 `json:"id"` IP string `json:"ip"` Blocked bool `json:"blocked"` DNSPtr []ServerPublicNetIPv6DNSPtr `json:"dns_ptr"` @@ -70,13 +70,13 @@ type ServerPublicNetIPv6DNSPtr struct { // ServerFirewall defines the schema of a Server's Firewalls on // a certain network interface. type ServerFirewall struct { - ID int `json:"id"` + ID int64 `json:"id"` Status string `json:"status"` } // ServerPrivateNet defines the schema of a server's private network information. type ServerPrivateNet struct { - Network int `json:"network"` + Network int64 `json:"network"` IP string `json:"ip"` AliasIPs []string `json:"alias_ips"` MACAddress string `json:"mac_address"` @@ -100,31 +100,31 @@ type ServerCreateRequest struct { Name string `json:"name"` ServerType interface{} `json:"server_type"` // int or string Image interface{} `json:"image"` // int or string - SSHKeys []int `json:"ssh_keys,omitempty"` + SSHKeys []int64 `json:"ssh_keys,omitempty"` Location string `json:"location,omitempty"` Datacenter string `json:"datacenter,omitempty"` UserData string `json:"user_data,omitempty"` StartAfterCreate *bool `json:"start_after_create,omitempty"` Labels *map[string]string `json:"labels,omitempty"` Automount *bool `json:"automount,omitempty"` - Volumes []int `json:"volumes,omitempty"` - Networks []int `json:"networks,omitempty"` + Volumes []int64 `json:"volumes,omitempty"` + Networks []int64 `json:"networks,omitempty"` Firewalls []ServerCreateFirewalls `json:"firewalls,omitempty"` - PlacementGroup int `json:"placement_group,omitempty"` + PlacementGroup int64 `json:"placement_group,omitempty"` PublicNet *ServerCreatePublicNet `json:"public_net,omitempty"` } // ServerCreatePublicNet defines the public network configuration of a server. type ServerCreatePublicNet struct { - EnableIPv4 bool `json:"enable_ipv4"` - EnableIPv6 bool `json:"enable_ipv6"` - IPv4ID int `json:"ipv4,omitempty"` - IPv6ID int `json:"ipv6,omitempty"` + EnableIPv4 bool `json:"enable_ipv4"` + EnableIPv6 bool `json:"enable_ipv6"` + IPv4ID int64 `json:"ipv4,omitempty"` + IPv6ID int64 `json:"ipv6,omitempty"` } // ServerCreateFirewalls defines which Firewalls to apply when creating a Server. type ServerCreateFirewalls struct { - Firewall int `json:"firewall"` + Firewall int64 `json:"firewall"` } // ServerCreateResponse defines the schema of the response when @@ -233,7 +233,7 @@ type ServerActionCreateImageResponse struct { // create a enable_rescue server action. type ServerActionEnableRescueRequest struct { Type *string `json:"type,omitempty"` - SSHKeys []int `json:"ssh_keys,omitempty"` + SSHKeys []int64 `json:"ssh_keys,omitempty"` } // ServerActionEnableRescueResponse defines the schema of the response when @@ -364,7 +364,7 @@ type ServerActionRequestConsoleResponse struct { // ServerActionAttachToNetworkRequest defines the schema for the request to // attach a network to a server. type ServerActionAttachToNetworkRequest struct { - Network int `json:"network"` + Network int64 `json:"network"` IP *string `json:"ip,omitempty"` AliasIPs []*string `json:"alias_ips,omitempty"` } @@ -378,7 +378,7 @@ type ServerActionAttachToNetworkResponse struct { // ServerActionDetachFromNetworkRequest defines the schema for the request to // detach a network from a server. type ServerActionDetachFromNetworkRequest struct { - Network int `json:"network"` + Network int64 `json:"network"` } // ServerActionDetachFromNetworkResponse defines the schema of the response when @@ -390,7 +390,7 @@ type ServerActionDetachFromNetworkResponse struct { // ServerActionChangeAliasIPsRequest defines the schema for the request to // change a server's alias IPs in a network. type ServerActionChangeAliasIPsRequest struct { - Network int `json:"network"` + Network int64 `json:"network"` AliasIPs []string `json:"alias_ips"` } @@ -419,7 +419,7 @@ type ServerTimeSeriesVals struct { // ServerActionAddToPlacementGroupRequest defines the schema for the request to // add a server to a placement group. type ServerActionAddToPlacementGroupRequest struct { - PlacementGroup int `json:"placement_group"` + PlacementGroup int64 `json:"placement_group"` } // ServerActionAddToPlacementGroupResponse defines the schema of the response when diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server_type.go new file mode 100644 index 000000000..0920a5ee1 --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/server_type.go @@ -0,0 +1,29 @@ +package schema + +// ServerType defines the schema of a server type. +type ServerType struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Cores int `json:"cores"` + Memory float32 `json:"memory"` + Disk int `json:"disk"` + StorageType string `json:"storage_type"` + CPUType string `json:"cpu_type"` + Architecture string `json:"architecture"` + IncludedTraffic int64 `json:"included_traffic"` + Prices []PricingServerTypePrice `json:"prices"` + DeprecatableResource +} + +// ServerTypeListResponse defines the schema of the response when +// listing server types. +type ServerTypeListResponse struct { + ServerTypes []ServerType `json:"server_types"` +} + +// ServerTypeGetResponse defines the schema of the response when +// retrieving a single server type. +type ServerTypeGetResponse struct { + ServerType ServerType `json:"server_type"` +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/ssh_key.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/ssh_key.go similarity index 97% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/ssh_key.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/ssh_key.go index f230b3ddd..7e095bc5a 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/ssh_key.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/ssh_key.go @@ -4,7 +4,7 @@ import "time" // SSHKey defines the schema of a SSH key. type SSHKey struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Fingerprint string `json:"fingerprint"` PublicKey string `json:"public_key"` diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/volume.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go similarity index 95% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/volume.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go index ad745ea97..0dd391bcc 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/volume.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/schema/volume.go @@ -4,9 +4,9 @@ import "time" // Volume defines the schema of a volume. type Volume struct { - ID int `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` - Server *int `json:"server"` + Server *int64 `json:"server"` Status string `json:"status"` Location Location `json:"location"` Size int `json:"size"` @@ -21,7 +21,7 @@ type Volume struct { type VolumeCreateRequest struct { Name string `json:"name"` Size int `json:"size"` - Server *int `json:"server,omitempty"` + Server *int64 `json:"server,omitempty"` Location interface{} `json:"location,omitempty"` // int, string, or nil Labels *map[string]string `json:"labels,omitempty"` Automount *bool `json:"automount,omitempty"` @@ -79,7 +79,7 @@ type VolumeActionChangeProtectionResponse struct { // VolumeActionAttachVolumeRequest defines the schema of the request to // attach a volume to a server. type VolumeActionAttachVolumeRequest struct { - Server int `json:"server"` + Server int64 `json:"server"` Automount *bool `json:"automount,omitempty"` } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go similarity index 98% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go index 52ea7fa5c..2b32cdf42 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server.go @@ -12,12 +12,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Server represents a server in the Hetzner Cloud. type Server struct { - ID int + ID int64 Name string Status ServerStatus Created time.Time @@ -98,7 +98,7 @@ type ServerPublicNet struct { // ServerPublicNetIPv4 represents a server's public IPv4 address. type ServerPublicNetIPv4 struct { - ID int + ID int64 IP net.IP Blocked bool DNSPtr string @@ -110,7 +110,7 @@ func (n *ServerPublicNetIPv4) IsUnspecified() bool { // ServerPublicNetIPv6 represents a Server's public IPv6 network and address. type ServerPublicNetIPv6 struct { - ID int + ID int64 IP net.IP Network *net.IPNet Blocked bool @@ -194,7 +194,7 @@ type ServerClient struct { } // GetByID retrieves a server by its ID. If the server does not exist, nil is returned. -func (c *ServerClient) GetByID(ctx context.Context, id int) (*Server, *Response, error) { +func (c *ServerClient) GetByID(ctx context.Context, id int64) (*Server, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/servers/%d", id), nil) if err != nil { return nil, nil, err @@ -226,8 +226,8 @@ func (c *ServerClient) GetByName(ctx context.Context, name string) (*Server, *Re // Get retrieves a server by its ID if the input can be parsed as an integer, otherwise it // retrieves a server by its name. If the server does not exist, nil is returned. func (c *ServerClient) Get(ctx context.Context, idOrName string) (*Server, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -241,7 +241,7 @@ type ServerListOpts struct { } func (l ServerListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -417,14 +417,14 @@ func (c *ServerClient) Create(ctx context.Context, opts ServerCreateOpts) (Serve } if opts.Location != nil { if opts.Location.ID != 0 { - reqBody.Location = strconv.Itoa(opts.Location.ID) + reqBody.Location = strconv.FormatInt(opts.Location.ID, 10) } else { reqBody.Location = opts.Location.Name } } if opts.Datacenter != nil { if opts.Datacenter.ID != 0 { - reqBody.Datacenter = strconv.Itoa(opts.Datacenter.ID) + reqBody.Datacenter = strconv.FormatInt(opts.Datacenter.ID, 10) } else { reqBody.Datacenter = opts.Datacenter.Name } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server_type.go similarity index 83% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server_type.go index 37ebb7f09..6fc7c1686 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/server_type.go @@ -6,12 +6,12 @@ import ( "net/url" "strconv" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // ServerType represents a server type in the Hetzner Cloud. type ServerType struct { - ID int + ID int64 Name string Description string Cores int @@ -20,7 +20,10 @@ type ServerType struct { StorageType StorageType CPUType CPUType Architecture Architecture - Pricings []ServerTypeLocationPricing + // IncludedTraffic is the free traffic per month in bytes + IncludedTraffic int64 + Pricings []ServerTypeLocationPricing + DeprecatableResource } // StorageType specifies the type of storage. @@ -51,7 +54,7 @@ type ServerTypeClient struct { } // GetByID retrieves a server type by its ID. If the server type does not exist, nil is returned. -func (c *ServerTypeClient) GetByID(ctx context.Context, id int) (*ServerType, *Response, error) { +func (c *ServerTypeClient) GetByID(ctx context.Context, id int64) (*ServerType, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/server_types/%d", id), nil) if err != nil { return nil, nil, err @@ -83,8 +86,8 @@ func (c *ServerTypeClient) GetByName(ctx context.Context, name string) (*ServerT // Get retrieves a server type by its ID if the input can be parsed as an integer, otherwise it // retrieves a server type by its name. If the server type does not exist, nil is returned. func (c *ServerTypeClient) Get(ctx context.Context, idOrName string) (*ServerType, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -97,7 +100,7 @@ type ServerTypeListOpts struct { } func (l ServerTypeListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } @@ -132,10 +135,12 @@ func (c *ServerTypeClient) List(ctx context.Context, opts ServerTypeListOpts) ([ // All returns all server types. func (c *ServerTypeClient) All(ctx context.Context) ([]*ServerType, error) { - allServerTypes := []*ServerType{} + return c.AllWithOpts(ctx, ServerTypeListOpts{ListOpts: ListOpts{PerPage: 50}}) +} - opts := ServerTypeListOpts{} - opts.PerPage = 50 +// AllWithOpts returns all server types for the given options. +func (c *ServerTypeClient) AllWithOpts(ctx context.Context, opts ServerTypeListOpts) ([]*ServerType, error) { + var allServerTypes []*ServerType err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/ssh_key.go similarity index 95% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/ssh_key.go index f450c8b92..45d4558f9 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/ssh_key.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/ssh_key.go @@ -10,12 +10,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // SSHKey represents a SSH key in the Hetzner Cloud. type SSHKey struct { - ID int + ID int64 Name string Fingerprint string PublicKey string @@ -29,7 +29,7 @@ type SSHKeyClient struct { } // GetByID retrieves a SSH key by its ID. If the SSH key does not exist, nil is returned. -func (c *SSHKeyClient) GetByID(ctx context.Context, id int) (*SSHKey, *Response, error) { +func (c *SSHKeyClient) GetByID(ctx context.Context, id int64) (*SSHKey, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/ssh_keys/%d", id), nil) if err != nil { return nil, nil, err @@ -70,8 +70,8 @@ func (c *SSHKeyClient) GetByFingerprint(ctx context.Context, fingerprint string) // Get retrieves a SSH key by its ID if the input can be parsed as an integer, otherwise it // retrieves a SSH key by its name. If the SSH key does not exist, nil is returned. func (c *SSHKeyClient) Get(ctx context.Context, idOrName string) (*SSHKey, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -85,7 +85,7 @@ type SSHKeyListOpts struct { } func (l SSHKeyListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } diff --git a/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/testing.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/testing.go new file mode 100644 index 000000000..63cd92f7b --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/testing.go @@ -0,0 +1,16 @@ +package hcloud + +import ( + "testing" + "time" +) + +func mustParseTime(t *testing.T, value string) time.Time { + t.Helper() + + ts, err := time.Parse(time.RFC3339, value) + if err != nil { + t.Fatalf("parse time: value %v: %v", value, err) + } + return ts +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go similarity index 97% rename from vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go rename to vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go index f939d89bb..b955384ee 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/volume.go +++ b/vendor/github.com/hetznercloud/hcloud-go/v2/hcloud/volume.go @@ -10,12 +10,12 @@ import ( "strconv" "time" - "github.com/hetznercloud/hcloud-go/hcloud/schema" + "github.com/hetznercloud/hcloud-go/v2/hcloud/schema" ) // Volume represents a volume in the Hetzner Cloud. type Volume struct { - ID int + ID int64 Name string Status VolumeStatus Server *Server @@ -49,7 +49,7 @@ const ( ) // GetByID retrieves a volume by its ID. If the volume does not exist, nil is returned. -func (c *VolumeClient) GetByID(ctx context.Context, id int) (*Volume, *Response, error) { +func (c *VolumeClient) GetByID(ctx context.Context, id int64) (*Volume, *Response, error) { req, err := c.client.NewRequest(ctx, "GET", fmt.Sprintf("/volumes/%d", id), nil) if err != nil { return nil, nil, err @@ -81,8 +81,8 @@ func (c *VolumeClient) GetByName(ctx context.Context, name string) (*Volume, *Re // Get retrieves a volume by its ID if the input can be parsed as an integer, otherwise it // retrieves a volume by its name. If the volume does not exist, nil is returned. func (c *VolumeClient) Get(ctx context.Context, idOrName string) (*Volume, *Response, error) { - if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + if id, err := strconv.ParseInt(idOrName, 10, 64); err == nil { + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } @@ -96,7 +96,7 @@ type VolumeListOpts struct { } func (l VolumeListOpts) values() url.Values { - vals := l.ListOpts.values() + vals := l.ListOpts.Values() if l.Name != "" { vals.Add("name", l.Name) } diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 000000000..0a1ff9f94 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 7e6f7aeee..ffbbb62c7 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,17 +1,20 @@ # Mergo - -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] [![Become my sponsor][15]][16] +[![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -26,6 +29,12 @@ [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://img.shields.io/github/sponsors/imdario [16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -55,7 +64,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ### Mergo in the wild -- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -231,5 +239,4 @@ Written by [Dario Castañé](http://dario.im). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 000000000..a5de61f77 --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46..b50d5c2a4 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8b4e2f47a..0ef9b2138 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 9fe362d47..0a721e2d8 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,7 @@ var ( ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/README.md b/vendor/github.com/ionos-cloud/sdk-go/v6/README.md index f020bdf43..48ab1f388 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/README.md +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/README.md @@ -352,7 +352,6 @@ KubernetesApi | [**K8sPut**](docs/api/KubernetesApi.md#k8sput) | **Put** /k8s/{k KubernetesApi | [**K8sVersionsDefaultGet**](docs/api/KubernetesApi.md#k8sversionsdefaultget) | **Get** /k8s/versions/default | Get Default Kubernetes Version KubernetesApi | [**K8sVersionsGet**](docs/api/KubernetesApi.md#k8sversionsget) | **Get** /k8s/versions | Get Kubernetes Versions LANsApi | [**DatacentersLansDelete**](docs/api/LANsApi.md#datacenterslansdelete) | **Delete** /datacenters/{datacenterId}/lans/{lanId} | Delete LANs -LANsApi | [**DatacentersLansEnableIpv6**](docs/api/LANsApi.md#datacenterslansenableipv6) | **Post** /datacenters/{datacenterId}/lans/enable-ipv6 | Enable IPv6 in the current Virtual Datacenter LANsApi | [**DatacentersLansFindById**](docs/api/LANsApi.md#datacenterslansfindbyid) | **Get** /datacenters/{datacenterId}/lans/{lanId} | Retrieve LANs LANsApi | [**DatacentersLansGet**](docs/api/LANsApi.md#datacenterslansget) | **Get** /datacenters/{datacenterId}/lans | List LANs LANsApi | [**DatacentersLansNicsFindById**](docs/api/LANsApi.md#datacenterslansnicsfindbyid) | **Get** /datacenters/{datacenterId}/lans/{lanId}/nics/{nicId} | Retrieve attached NICs @@ -620,7 +619,9 @@ All URIs are relative to *https://api.ionos.com/cloudapi/v6* - [Lan](docs/models/Lan) - [LanEntities](docs/models/LanEntities) - [LanNics](docs/models/LanNics) + - [LanPost](docs/models/LanPost) - [LanProperties](docs/models/LanProperties) + - [LanPropertiesPost](docs/models/LanPropertiesPost) - [Lans](docs/models/Lans) - [Loadbalancer](docs/models/Loadbalancer) - [LoadbalancerEntities](docs/models/LoadbalancerEntities) diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go index 07560c528..75e864a53 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go @@ -192,167 +192,6 @@ func (a *LANsApiService) DatacentersLansDeleteExecute(r ApiDatacentersLansDelete return localVarAPIResponse, nil } -type ApiDatacentersLansEnableIpv6Request struct { - ctx _context.Context - ApiService *LANsApiService - datacenterId string - pretty *bool - depth *int32 - xContractNumber *int32 -} - -func (r ApiDatacentersLansEnableIpv6Request) Pretty(pretty bool) ApiDatacentersLansEnableIpv6Request { - r.pretty = &pretty - return r -} -func (r ApiDatacentersLansEnableIpv6Request) Depth(depth int32) ApiDatacentersLansEnableIpv6Request { - r.depth = &depth - return r -} -func (r ApiDatacentersLansEnableIpv6Request) XContractNumber(xContractNumber int32) ApiDatacentersLansEnableIpv6Request { - r.xContractNumber = &xContractNumber - return r -} - -func (r ApiDatacentersLansEnableIpv6Request) Execute() (*APIResponse, error) { - return r.ApiService.DatacentersLansEnableIpv6Execute(r) -} - -/* - * DatacentersLansEnableIpv6 Enable IPv6 in the current Virtual Datacenter - * Enable IPv6 for all NICs in the current Virtual Datacenter. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @param datacenterId The unique ID of the data center. - * @return ApiDatacentersLansEnableIpv6Request - */ -func (a *LANsApiService) DatacentersLansEnableIpv6(ctx _context.Context, datacenterId string) ApiDatacentersLansEnableIpv6Request { - return ApiDatacentersLansEnableIpv6Request{ - ApiService: a, - ctx: ctx, - datacenterId: datacenterId, - } -} - -/* - * Execute executes the request - */ -func (a *LANsApiService) DatacentersLansEnableIpv6Execute(r ApiDatacentersLansEnableIpv6Request) (*APIResponse, error) { - var ( - localVarHTTPMethod = _nethttp.MethodPost - localVarPostBody interface{} - localVarFormFileName string - localVarFileName string - localVarFileBytes []byte - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "LANsApiService.DatacentersLansEnableIpv6") - if err != nil { - return nil, GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/datacenters/{datacenterId}/lans/enable-ipv6" - localVarPath = strings.Replace(localVarPath, "{"+"datacenterId"+"}", _neturl.PathEscape(parameterToString(r.datacenterId, "")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := _neturl.Values{} - localVarFormParams := _neturl.Values{} - - if r.pretty != nil { - localVarQueryParams.Add("pretty", parameterToString(*r.pretty, "")) - } else { - defaultQueryParam := a.client.cfg.DefaultQueryParams.Get("pretty") - if defaultQueryParam == "" { - localVarQueryParams.Add("pretty", parameterToString(true, "")) - } - } - if r.depth != nil { - localVarQueryParams.Add("depth", parameterToString(*r.depth, "")) - } else { - defaultQueryParam := a.client.cfg.DefaultQueryParams.Get("depth") - if defaultQueryParam == "" { - localVarQueryParams.Add("depth", parameterToString(0, "")) - } - } - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{"*/*"} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - if r.xContractNumber != nil { - localVarHeaderParams["X-Contract-Number"] = parameterToString(*r.xContractNumber, "") - } - if r.ctx != nil { - // API Key Authentication - if auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok { - if apiKey, ok := auth["Token Authentication"]; ok { - var key string - if apiKey.Prefix != "" { - key = apiKey.Prefix + " " + apiKey.Key - } else { - key = apiKey.Key - } - localVarHeaderParams["Authorization"] = key - } - } - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) - if err != nil { - return nil, err - } - - localVarHTTPResponse, httpRequestTime, err := a.client.callAPI(req) - - localVarAPIResponse := &APIResponse{ - Response: localVarHTTPResponse, - Method: localVarHTTPMethod, - RequestURL: localVarPath, - RequestTime: httpRequestTime, - Operation: "DatacentersLansEnableIpv6", - } - - if err != nil || localVarHTTPResponse == nil { - return localVarAPIResponse, err - } - - localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarAPIResponse.Payload = localVarBody - if err != nil { - return localVarAPIResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := GenericOpenAPIError{ - statusCode: localVarHTTPResponse.StatusCode, - body: localVarBody, - error: fmt.Sprintf(FormatStringErr, localVarHTTPResponse.Status, string(localVarBody)), - } - var v Error - err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) - if err != nil { - newErr.error = fmt.Sprintf(FormatStringErr, localVarHTTPResponse.Status, err.Error()) - return localVarAPIResponse, newErr - } - newErr.model = v - return localVarAPIResponse, newErr - } - - return localVarAPIResponse, nil -} - type ApiDatacentersLansFindByIdRequest struct { ctx _context.Context ApiService *LANsApiService @@ -1563,13 +1402,13 @@ type ApiDatacentersLansPostRequest struct { ctx _context.Context ApiService *LANsApiService datacenterId string - lan *Lan + lan *LanPost pretty *bool depth *int32 xContractNumber *int32 } -func (r ApiDatacentersLansPostRequest) Lan(lan Lan) ApiDatacentersLansPostRequest { +func (r ApiDatacentersLansPostRequest) Lan(lan LanPost) ApiDatacentersLansPostRequest { r.lan = &lan return r } @@ -1586,7 +1425,7 @@ func (r ApiDatacentersLansPostRequest) XContractNumber(xContractNumber int32) Ap return r } -func (r ApiDatacentersLansPostRequest) Execute() (Lan, *APIResponse, error) { +func (r ApiDatacentersLansPostRequest) Execute() (LanPost, *APIResponse, error) { return r.ApiService.DatacentersLansPostExecute(r) } @@ -1607,16 +1446,16 @@ func (a *LANsApiService) DatacentersLansPost(ctx _context.Context, datacenterId /* * Execute executes the request - * @return Lan + * @return LanPost */ -func (a *LANsApiService) DatacentersLansPostExecute(r ApiDatacentersLansPostRequest) (Lan, *APIResponse, error) { +func (a *LANsApiService) DatacentersLansPostExecute(r ApiDatacentersLansPostRequest) (LanPost, *APIResponse, error) { var ( localVarHTTPMethod = _nethttp.MethodPost localVarPostBody interface{} localVarFormFileName string localVarFileName string localVarFileBytes []byte - localVarReturnValue Lan + localVarReturnValue LanPost ) localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "LANsApiService.DatacentersLansPost") diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/client.go b/vendor/github.com/ionos-cloud/sdk-go/v6/client.go index 0bbdf2a6d..4b5a7e0e3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/client.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/client.go @@ -53,7 +53,7 @@ const ( RequestStatusFailed = "FAILED" RequestStatusDone = "DONE" - Version = "6.1.6" + Version = "6.1.8" ) // Constants for APIs @@ -370,6 +370,9 @@ func (c *APIClient) callAPI(request *http.Request) (*http.Response, time.Duratio case http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusBadGateway: + if request.Method == http.MethodPost { + return resp, httpRequestTime, err + } backoffTime = c.GetConfig().WaitTime case http.StatusTooManyRequests: @@ -393,21 +396,32 @@ func (c *APIClient) callAPI(request *http.Request) (*http.Response, time.Duratio } break } else { - c.backOff(backoffTime) + c.backOff(request.Context(), backoffTime) } } return resp, httpRequestTime, err } -func (c *APIClient) backOff(t time.Duration) { +func (c *APIClient) backOff(ctx context.Context, t time.Duration) { if t > c.GetConfig().MaxWaitTime { t = c.GetConfig().MaxWaitTime } if c.cfg.Debug || c.cfg.LogLevel.Satisfies(Debug) { c.cfg.Logger.Printf(" sleeping %s before retrying request\n", t.String()) } - time.Sleep(t) + + if t <= 0 { + return + } + + timer := time.NewTimer(t) + defer timer.Stop() + + select { + case <-ctx.Done(): + case <-timer.C: + } } // Allow modification of underlying config for alternate implementations and testing diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go b/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go index d6ef150c2..6845f4a66 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go @@ -130,7 +130,7 @@ func NewConfiguration(username, password, token, hostUrl string) *Configuration cfg := &Configuration{ DefaultHeader: make(map[string]string), DefaultQueryParams: url.Values{}, - UserAgent: "ionos-cloud-sdk-go/v6.1.6", + UserAgent: "ionos-cloud-sdk-go/v6.1.8", Debug: false, Username: username, Password: password, diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go index a63fe3431..61cf51188 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go @@ -16,15 +16,15 @@ import ( // ApplicationLoadBalancer struct for ApplicationLoadBalancer type ApplicationLoadBalancer struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *ApplicationLoadBalancerEntities `json:"entities,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ApplicationLoadBalancerProperties `json:"properties"` - Entities *ApplicationLoadBalancerEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewApplicationLoadBalancer instantiates a new ApplicationLoadBalancer object @@ -47,114 +47,114 @@ func NewApplicationLoadBalancerWithDefaults() *ApplicationLoadBalancer { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancer) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancer) GetEntities() *ApplicationLoadBalancerEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancer) GetIdOk() (*string, bool) { +func (o *ApplicationLoadBalancer) GetEntitiesOk() (*ApplicationLoadBalancerEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *ApplicationLoadBalancer) SetId(v string) { +// SetEntities sets field value +func (o *ApplicationLoadBalancer) SetEntities(v ApplicationLoadBalancerEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *ApplicationLoadBalancer) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *ApplicationLoadBalancer) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ApplicationLoadBalancer) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancer) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancer) GetTypeOk() (*Type, bool) { +func (o *ApplicationLoadBalancer) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *ApplicationLoadBalancer) SetType(v Type) { +// SetHref sets field value +func (o *ApplicationLoadBalancer) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancer) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *ApplicationLoadBalancer) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancer) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancer) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancer) GetHrefOk() (*string, bool) { +func (o *ApplicationLoadBalancer) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *ApplicationLoadBalancer) SetHref(v string) { +// SetId sets field value +func (o *ApplicationLoadBalancer) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *ApplicationLoadBalancer) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *ApplicationLoadBalancer) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *ApplicationLoadBalancer) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancer) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *ApplicationLoadBalancer) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ApplicationLoadBalancerProperties will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancer) GetProperties() *ApplicationLoadBalancerProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *ApplicationLoadBalancer) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for ApplicationLoadBalancerEntities will be returned -func (o *ApplicationLoadBalancer) GetEntities() *ApplicationLoadBalancerEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancer) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancer) GetEntitiesOk() (*ApplicationLoadBalancerEntities, bool) { +func (o *ApplicationLoadBalancer) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *ApplicationLoadBalancer) SetEntities(v ApplicationLoadBalancerEntities) { +// SetType sets field value +func (o *ApplicationLoadBalancer) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *ApplicationLoadBalancer) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancer) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *ApplicationLoadBalancer) HasEntities() bool { func (o ApplicationLoadBalancer) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_entities.go index d8cd1a152..425fceaa1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_entities.go @@ -38,7 +38,7 @@ func NewApplicationLoadBalancerEntitiesWithDefaults() *ApplicationLoadBalancerEn } // GetForwardingrules returns the Forwardingrules field value -// If the value is explicit nil, the zero value for ApplicationLoadBalancerForwardingRules will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerEntities) GetForwardingrules() *ApplicationLoadBalancerForwardingRules { if o == nil { return nil @@ -80,6 +80,7 @@ func (o ApplicationLoadBalancerEntities) MarshalJSON() ([]byte, error) { if o.Forwardingrules != nil { toSerialize["forwardingrules"] = o.Forwardingrules } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go index 34c30dfc6..e9e1bd5c9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go @@ -16,14 +16,14 @@ import ( // ApplicationLoadBalancerForwardingRule struct for ApplicationLoadBalancerForwardingRule type ApplicationLoadBalancerForwardingRule struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ApplicationLoadBalancerForwardingRuleProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewApplicationLoadBalancerForwardingRule instantiates a new ApplicationLoadBalancerForwardingRule object @@ -46,190 +46,190 @@ func NewApplicationLoadBalancerForwardingRuleWithDefaults() *ApplicationLoadBala return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRule) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetIdOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRule) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *ApplicationLoadBalancerForwardingRule) SetId(v string) { +// SetHref sets field value +func (o *ApplicationLoadBalancerForwardingRule) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRule) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRule) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRule) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetTypeOk() (*Type, bool) { +func (o *ApplicationLoadBalancerForwardingRule) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *ApplicationLoadBalancerForwardingRule) SetType(v Type) { +// SetId sets field value +func (o *ApplicationLoadBalancerForwardingRule) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRule) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRule) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRule) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetHrefOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *ApplicationLoadBalancerForwardingRule) SetHref(v string) { +// SetMetadata sets field value +func (o *ApplicationLoadBalancerForwardingRule) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRule) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRule) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRule) GetProperties() *ApplicationLoadBalancerForwardingRuleProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *ApplicationLoadBalancerForwardingRule) GetPropertiesOk() (*ApplicationLoadBalancerForwardingRuleProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *ApplicationLoadBalancerForwardingRule) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *ApplicationLoadBalancerForwardingRule) SetProperties(v ApplicationLoadBalancerForwardingRuleProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRule) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRule) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ApplicationLoadBalancerForwardingRuleProperties will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetProperties() *ApplicationLoadBalancerForwardingRuleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRule) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRule) GetPropertiesOk() (*ApplicationLoadBalancerForwardingRuleProperties, bool) { +func (o *ApplicationLoadBalancerForwardingRule) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *ApplicationLoadBalancerForwardingRule) SetProperties(v ApplicationLoadBalancerForwardingRuleProperties) { +// SetType sets field value +func (o *ApplicationLoadBalancerForwardingRule) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRule) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRule) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *ApplicationLoadBalancerForwardingRule) HasProperties() bool { func (o ApplicationLoadBalancerForwardingRule) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go index f73da694f..b5adc750a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go @@ -16,33 +16,33 @@ import ( // ApplicationLoadBalancerForwardingRuleProperties struct for ApplicationLoadBalancerForwardingRuleProperties type ApplicationLoadBalancerForwardingRuleProperties struct { - // The name of the Application Load Balancer forwarding rule. - Name *string `json:"name"` - // The balancing protocol. - Protocol *string `json:"protocol"` + // The maximum time in milliseconds to wait for the client to acknowledge or send data; default is 50,000 (50 seconds). + ClientTimeout *int32 `json:"clientTimeout,omitempty"` + // An array of items in the collection. The original order of rules is preserved during processing, except that rules of the 'FORWARD' type are processed after the rules with other defined actions. The relative order of the 'FORWARD' type rules is also preserved during the processing. + HttpRules *[]ApplicationLoadBalancerHttpRule `json:"httpRules,omitempty"` // The listening (inbound) IP. ListenerIp *string `json:"listenerIp"` // The listening (inbound) port number; the valid range is 1 to 65535. ListenerPort *int32 `json:"listenerPort"` - // The maximum time in milliseconds to wait for the client to acknowledge or send data; default is 50,000 (50 seconds). - ClientTimeout *int32 `json:"clientTimeout,omitempty"` + // The name of the Application Load Balancer forwarding rule. + Name *string `json:"name"` + // The balancing protocol. + Protocol *string `json:"protocol"` // Array of items in the collection. ServerCertificates *[]string `json:"serverCertificates,omitempty"` - // An array of items in the collection. The original order of rules is preserved during processing, except that rules of the 'FORWARD' type are processed after the rules with other defined actions. The relative order of the 'FORWARD' type rules is also preserved during the processing. - HttpRules *[]ApplicationLoadBalancerHttpRule `json:"httpRules,omitempty"` } // NewApplicationLoadBalancerForwardingRuleProperties instantiates a new ApplicationLoadBalancerForwardingRuleProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewApplicationLoadBalancerForwardingRuleProperties(name string, protocol string, listenerIp string, listenerPort int32) *ApplicationLoadBalancerForwardingRuleProperties { +func NewApplicationLoadBalancerForwardingRuleProperties(listenerIp string, listenerPort int32, name string, protocol string) *ApplicationLoadBalancerForwardingRuleProperties { this := ApplicationLoadBalancerForwardingRuleProperties{} - this.Name = &name - this.Protocol = &protocol this.ListenerIp = &listenerIp this.ListenerPort = &listenerPort + this.Name = &name + this.Protocol = &protocol return &this } @@ -55,76 +55,76 @@ func NewApplicationLoadBalancerForwardingRulePropertiesWithDefaults() *Applicati return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetName() *string { +// GetClientTimeout returns the ClientTimeout field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetClientTimeout() *int32 { if o == nil { return nil } - return o.Name + return o.ClientTimeout } -// GetNameOk returns a tuple with the Name field value +// GetClientTimeoutOk returns a tuple with the ClientTimeout field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetNameOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetClientTimeoutOk() (*int32, bool) { if o == nil { return nil, false } - return o.Name, true + return o.ClientTimeout, true } -// SetName sets field value -func (o *ApplicationLoadBalancerForwardingRuleProperties) SetName(v string) { +// SetClientTimeout sets field value +func (o *ApplicationLoadBalancerForwardingRuleProperties) SetClientTimeout(v int32) { - o.Name = &v + o.ClientTimeout = &v } -// HasName returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRuleProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasClientTimeout returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRuleProperties) HasClientTimeout() bool { + if o != nil && o.ClientTimeout != nil { return true } return false } -// GetProtocol returns the Protocol field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetProtocol() *string { +// GetHttpRules returns the HttpRules field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetHttpRules() *[]ApplicationLoadBalancerHttpRule { if o == nil { return nil } - return o.Protocol + return o.HttpRules } -// GetProtocolOk returns a tuple with the Protocol field value +// GetHttpRulesOk returns a tuple with the HttpRules field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetProtocolOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetHttpRulesOk() (*[]ApplicationLoadBalancerHttpRule, bool) { if o == nil { return nil, false } - return o.Protocol, true + return o.HttpRules, true } -// SetProtocol sets field value -func (o *ApplicationLoadBalancerForwardingRuleProperties) SetProtocol(v string) { +// SetHttpRules sets field value +func (o *ApplicationLoadBalancerForwardingRuleProperties) SetHttpRules(v []ApplicationLoadBalancerHttpRule) { - o.Protocol = &v + o.HttpRules = &v } -// HasProtocol returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRuleProperties) HasProtocol() bool { - if o != nil && o.Protocol != nil { +// HasHttpRules returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRuleProperties) HasHttpRules() bool { + if o != nil && o.HttpRules != nil { return true } @@ -132,7 +132,7 @@ func (o *ApplicationLoadBalancerForwardingRuleProperties) HasProtocol() bool { } // GetListenerIp returns the ListenerIp field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerForwardingRuleProperties) GetListenerIp() *string { if o == nil { return nil @@ -170,7 +170,7 @@ func (o *ApplicationLoadBalancerForwardingRuleProperties) HasListenerIp() bool { } // GetListenerPort returns the ListenerPort field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerForwardingRuleProperties) GetListenerPort() *int32 { if o == nil { return nil @@ -207,114 +207,114 @@ func (o *ApplicationLoadBalancerForwardingRuleProperties) HasListenerPort() bool return false } -// GetClientTimeout returns the ClientTimeout field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetClientTimeout() *int32 { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetName() *string { if o == nil { return nil } - return o.ClientTimeout + return o.Name } -// GetClientTimeoutOk returns a tuple with the ClientTimeout field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetClientTimeoutOk() (*int32, bool) { +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.ClientTimeout, true + return o.Name, true } -// SetClientTimeout sets field value -func (o *ApplicationLoadBalancerForwardingRuleProperties) SetClientTimeout(v int32) { +// SetName sets field value +func (o *ApplicationLoadBalancerForwardingRuleProperties) SetName(v string) { - o.ClientTimeout = &v + o.Name = &v } -// HasClientTimeout returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRuleProperties) HasClientTimeout() bool { - if o != nil && o.ClientTimeout != nil { +// HasName returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRuleProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetServerCertificates returns the ServerCertificates field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetServerCertificates() *[]string { +// GetProtocol returns the Protocol field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetProtocol() *string { if o == nil { return nil } - return o.ServerCertificates + return o.Protocol } -// GetServerCertificatesOk returns a tuple with the ServerCertificates field value +// GetProtocolOk returns a tuple with the Protocol field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetServerCertificatesOk() (*[]string, bool) { +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetProtocolOk() (*string, bool) { if o == nil { return nil, false } - return o.ServerCertificates, true + return o.Protocol, true } -// SetServerCertificates sets field value -func (o *ApplicationLoadBalancerForwardingRuleProperties) SetServerCertificates(v []string) { +// SetProtocol sets field value +func (o *ApplicationLoadBalancerForwardingRuleProperties) SetProtocol(v string) { - o.ServerCertificates = &v + o.Protocol = &v } -// HasServerCertificates returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRuleProperties) HasServerCertificates() bool { - if o != nil && o.ServerCertificates != nil { +// HasProtocol returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRuleProperties) HasProtocol() bool { + if o != nil && o.Protocol != nil { return true } return false } -// GetHttpRules returns the HttpRules field value -// If the value is explicit nil, the zero value for []ApplicationLoadBalancerHttpRule will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetHttpRules() *[]ApplicationLoadBalancerHttpRule { +// GetServerCertificates returns the ServerCertificates field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetServerCertificates() *[]string { if o == nil { return nil } - return o.HttpRules + return o.ServerCertificates } -// GetHttpRulesOk returns a tuple with the HttpRules field value +// GetServerCertificatesOk returns a tuple with the ServerCertificates field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRuleProperties) GetHttpRulesOk() (*[]ApplicationLoadBalancerHttpRule, bool) { +func (o *ApplicationLoadBalancerForwardingRuleProperties) GetServerCertificatesOk() (*[]string, bool) { if o == nil { return nil, false } - return o.HttpRules, true + return o.ServerCertificates, true } -// SetHttpRules sets field value -func (o *ApplicationLoadBalancerForwardingRuleProperties) SetHttpRules(v []ApplicationLoadBalancerHttpRule) { +// SetServerCertificates sets field value +func (o *ApplicationLoadBalancerForwardingRuleProperties) SetServerCertificates(v []string) { - o.HttpRules = &v + o.ServerCertificates = &v } -// HasHttpRules returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRuleProperties) HasHttpRules() bool { - if o != nil && o.HttpRules != nil { +// HasServerCertificates returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRuleProperties) HasServerCertificates() bool { + if o != nil && o.ServerCertificates != nil { return true } @@ -323,27 +323,34 @@ func (o *ApplicationLoadBalancerForwardingRuleProperties) HasHttpRules() bool { func (o ApplicationLoadBalancerForwardingRuleProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.ClientTimeout != nil { + toSerialize["clientTimeout"] = o.ClientTimeout } - if o.Protocol != nil { - toSerialize["protocol"] = o.Protocol + + if o.HttpRules != nil { + toSerialize["httpRules"] = o.HttpRules } + if o.ListenerIp != nil { toSerialize["listenerIp"] = o.ListenerIp } + if o.ListenerPort != nil { toSerialize["listenerPort"] = o.ListenerPort } - if o.ClientTimeout != nil { - toSerialize["clientTimeout"] = o.ClientTimeout + + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.Protocol != nil { + toSerialize["protocol"] = o.Protocol } + if o.ServerCertificates != nil { toSerialize["serverCertificates"] = o.ServerCertificates } - if o.HttpRules != nil { - toSerialize["httpRules"] = o.HttpRules - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go index 0328b229c..d5e6f649a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go @@ -16,13 +16,13 @@ import ( // ApplicationLoadBalancerForwardingRulePut struct for ApplicationLoadBalancerForwardingRulePut type ApplicationLoadBalancerForwardingRulePut struct { + // The URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *ApplicationLoadBalancerForwardingRuleProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *ApplicationLoadBalancerForwardingRuleProperties `json:"properties"` } // NewApplicationLoadBalancerForwardingRulePut instantiates a new ApplicationLoadBalancerForwardingRulePut object @@ -45,152 +45,152 @@ func NewApplicationLoadBalancerForwardingRulePutWithDefaults() *ApplicationLoadB return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRulePut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetIdOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRulePut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *ApplicationLoadBalancerForwardingRulePut) SetId(v string) { +// SetHref sets field value +func (o *ApplicationLoadBalancerForwardingRulePut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRulePut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRulePut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRulePut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetTypeOk() (*Type, bool) { +func (o *ApplicationLoadBalancerForwardingRulePut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *ApplicationLoadBalancerForwardingRulePut) SetType(v Type) { +// SetId sets field value +func (o *ApplicationLoadBalancerForwardingRulePut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRulePut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRulePut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRulePut) GetProperties() *ApplicationLoadBalancerForwardingRuleProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetHrefOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRulePut) GetPropertiesOk() (*ApplicationLoadBalancerForwardingRuleProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *ApplicationLoadBalancerForwardingRulePut) SetHref(v string) { +// SetProperties sets field value +func (o *ApplicationLoadBalancerForwardingRulePut) SetProperties(v ApplicationLoadBalancerForwardingRuleProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRulePut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRulePut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ApplicationLoadBalancerForwardingRuleProperties will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetProperties() *ApplicationLoadBalancerForwardingRuleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRulePut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRulePut) GetPropertiesOk() (*ApplicationLoadBalancerForwardingRuleProperties, bool) { +func (o *ApplicationLoadBalancerForwardingRulePut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *ApplicationLoadBalancerForwardingRulePut) SetProperties(v ApplicationLoadBalancerForwardingRuleProperties) { +// SetType sets field value +func (o *ApplicationLoadBalancerForwardingRulePut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRulePut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRulePut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *ApplicationLoadBalancerForwardingRulePut) HasProperties() bool { func (o ApplicationLoadBalancerForwardingRulePut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go index 14b1f86e5..aae71997f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go @@ -16,19 +16,19 @@ import ( // ApplicationLoadBalancerForwardingRules struct for ApplicationLoadBalancerForwardingRules type ApplicationLoadBalancerForwardingRules struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]ApplicationLoadBalancerForwardingRule `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewApplicationLoadBalancerForwardingRules instantiates a new ApplicationLoadBalancerForwardingRules object @@ -49,114 +49,114 @@ func NewApplicationLoadBalancerForwardingRulesWithDefaults() *ApplicationLoadBal return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRules) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetIdOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRules) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *ApplicationLoadBalancerForwardingRules) SetId(v string) { +// SetLinks sets field value +func (o *ApplicationLoadBalancerForwardingRules) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRules) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRules) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRules) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetTypeOk() (*Type, bool) { +func (o *ApplicationLoadBalancerForwardingRules) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *ApplicationLoadBalancerForwardingRules) SetType(v Type) { +// SetHref sets field value +func (o *ApplicationLoadBalancerForwardingRules) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRules) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRules) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRules) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetHrefOk() (*string, bool) { +func (o *ApplicationLoadBalancerForwardingRules) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *ApplicationLoadBalancerForwardingRules) SetHref(v string) { +// SetId sets field value +func (o *ApplicationLoadBalancerForwardingRules) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRules) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRules) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *ApplicationLoadBalancerForwardingRules) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []ApplicationLoadBalancerForwardingRule will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerForwardingRules) GetItems() *[]ApplicationLoadBalancerForwardingRule { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *ApplicationLoadBalancerForwardingRules) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRules) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetOffsetOk() (*float32, bool) { +func (o *ApplicationLoadBalancerForwardingRules) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *ApplicationLoadBalancerForwardingRules) SetOffset(v float32) { +// SetLimit sets field value +func (o *ApplicationLoadBalancerForwardingRules) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRules) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRules) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRules) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetLimitOk() (*float32, bool) { +func (o *ApplicationLoadBalancerForwardingRules) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *ApplicationLoadBalancerForwardingRules) SetLimit(v float32) { +// SetOffset sets field value +func (o *ApplicationLoadBalancerForwardingRules) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRules) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRules) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerForwardingRules) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerForwardingRules) GetLinksOk() (*PaginationLinks, bool) { +func (o *ApplicationLoadBalancerForwardingRules) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *ApplicationLoadBalancerForwardingRules) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *ApplicationLoadBalancerForwardingRules) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerForwardingRules) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerForwardingRules) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *ApplicationLoadBalancerForwardingRules) HasLinks() bool { func (o ApplicationLoadBalancerForwardingRules) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go index 9c117202e..1d52893f5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go @@ -16,24 +16,24 @@ import ( // ApplicationLoadBalancerHttpRule struct for ApplicationLoadBalancerHttpRule type ApplicationLoadBalancerHttpRule struct { - // The unique name of the Application Load Balancer HTTP rule. - Name *string `json:"name"` - // The HTTP rule type. - Type *string `json:"type"` - // The ID of the target group; this parameter is mandatory and is valid only for 'FORWARD' actions. - TargetGroup *string `json:"targetGroup,omitempty"` + // An array of items in the collection. The action will be executed only if each condition is met; the rule will always be applied if no conditions are set. + Conditions *[]ApplicationLoadBalancerHttpRuleCondition `json:"conditions,omitempty"` + // Specifies the content type and is valid only for 'STATIC' actions. + ContentType *string `json:"contentType,omitempty"` // Indicates whether the query part of the URI should be dropped and is valid only for 'REDIRECT' actions. Default value is 'FALSE', the redirect URI does not contain any query parameters. DropQuery *bool `json:"dropQuery,omitempty"` // The location for the redirection; this parameter is mandatory and valid only for 'REDIRECT' actions. Location *string `json:"location,omitempty"` - // The status code is for 'REDIRECT' and 'STATIC' actions only. If the HTTP rule is 'REDIRECT' the valid values are: 301, 302, 303, 307, 308; default value is '301'. If the HTTP rule is 'STATIC' the valid values are from the range 200-599; default value is '503'. - StatusCode *int32 `json:"statusCode,omitempty"` + // The unique name of the Application Load Balancer HTTP rule. + Name *string `json:"name"` // The response message of the request; this parameter is mandatory for 'STATIC' actions. ResponseMessage *string `json:"responseMessage,omitempty"` - // Specifies the content type and is valid only for 'STATIC' actions. - ContentType *string `json:"contentType,omitempty"` - // An array of items in the collection. The action will be executed only if each condition is met; the rule will always be applied if no conditions are set. - Conditions *[]ApplicationLoadBalancerHttpRuleCondition `json:"conditions,omitempty"` + // The status code is for 'REDIRECT' and 'STATIC' actions only. If the HTTP rule is 'REDIRECT' the valid values are: 301, 302, 303, 307, 308; default value is '301'. If the HTTP rule is 'STATIC' the valid values are from the range 200-599; default value is '503'. + StatusCode *int32 `json:"statusCode,omitempty"` + // The ID of the target group; this parameter is mandatory and is valid only for 'FORWARD' actions. + TargetGroup *string `json:"targetGroup,omitempty"` + // The HTTP rule type. + Type *string `json:"type"` } // NewApplicationLoadBalancerHttpRule instantiates a new ApplicationLoadBalancerHttpRule object @@ -57,114 +57,76 @@ func NewApplicationLoadBalancerHttpRuleWithDefaults() *ApplicationLoadBalancerHt return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRule) GetName() *string { - if o == nil { - return nil - } - - return o.Name - -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRule) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Name, true -} - -// SetName sets field value -func (o *ApplicationLoadBalancerHttpRule) SetName(v string) { - - o.Name = &v - -} - -// HasName returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRule) HasName() bool { - if o != nil && o.Name != nil { - return true - } - - return false -} - -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRule) GetType() *string { +// GetConditions returns the Conditions field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRule) GetConditions() *[]ApplicationLoadBalancerHttpRuleCondition { if o == nil { return nil } - return o.Type + return o.Conditions } -// GetTypeOk returns a tuple with the Type field value +// GetConditionsOk returns a tuple with the Conditions field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRule) GetTypeOk() (*string, bool) { +func (o *ApplicationLoadBalancerHttpRule) GetConditionsOk() (*[]ApplicationLoadBalancerHttpRuleCondition, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Conditions, true } -// SetType sets field value -func (o *ApplicationLoadBalancerHttpRule) SetType(v string) { +// SetConditions sets field value +func (o *ApplicationLoadBalancerHttpRule) SetConditions(v []ApplicationLoadBalancerHttpRuleCondition) { - o.Type = &v + o.Conditions = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRule) HasType() bool { - if o != nil && o.Type != nil { +// HasConditions returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRule) HasConditions() bool { + if o != nil && o.Conditions != nil { return true } return false } -// GetTargetGroup returns the TargetGroup field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRule) GetTargetGroup() *string { +// GetContentType returns the ContentType field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRule) GetContentType() *string { if o == nil { return nil } - return o.TargetGroup + return o.ContentType } -// GetTargetGroupOk returns a tuple with the TargetGroup field value +// GetContentTypeOk returns a tuple with the ContentType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRule) GetTargetGroupOk() (*string, bool) { +func (o *ApplicationLoadBalancerHttpRule) GetContentTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.TargetGroup, true + return o.ContentType, true } -// SetTargetGroup sets field value -func (o *ApplicationLoadBalancerHttpRule) SetTargetGroup(v string) { +// SetContentType sets field value +func (o *ApplicationLoadBalancerHttpRule) SetContentType(v string) { - o.TargetGroup = &v + o.ContentType = &v } -// HasTargetGroup returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRule) HasTargetGroup() bool { - if o != nil && o.TargetGroup != nil { +// HasContentType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRule) HasContentType() bool { + if o != nil && o.ContentType != nil { return true } @@ -172,7 +134,7 @@ func (o *ApplicationLoadBalancerHttpRule) HasTargetGroup() bool { } // GetDropQuery returns the DropQuery field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerHttpRule) GetDropQuery() *bool { if o == nil { return nil @@ -210,7 +172,7 @@ func (o *ApplicationLoadBalancerHttpRule) HasDropQuery() bool { } // GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerHttpRule) GetLocation() *string { if o == nil { return nil @@ -247,38 +209,38 @@ func (o *ApplicationLoadBalancerHttpRule) HasLocation() bool { return false } -// GetStatusCode returns the StatusCode field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ApplicationLoadBalancerHttpRule) GetStatusCode() *int32 { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRule) GetName() *string { if o == nil { return nil } - return o.StatusCode + return o.Name } -// GetStatusCodeOk returns a tuple with the StatusCode field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRule) GetStatusCodeOk() (*int32, bool) { +func (o *ApplicationLoadBalancerHttpRule) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.StatusCode, true + return o.Name, true } -// SetStatusCode sets field value -func (o *ApplicationLoadBalancerHttpRule) SetStatusCode(v int32) { +// SetName sets field value +func (o *ApplicationLoadBalancerHttpRule) SetName(v string) { - o.StatusCode = &v + o.Name = &v } -// HasStatusCode returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRule) HasStatusCode() bool { - if o != nil && o.StatusCode != nil { +// HasName returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRule) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -286,7 +248,7 @@ func (o *ApplicationLoadBalancerHttpRule) HasStatusCode() bool { } // GetResponseMessage returns the ResponseMessage field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerHttpRule) GetResponseMessage() *string { if o == nil { return nil @@ -323,76 +285,114 @@ func (o *ApplicationLoadBalancerHttpRule) HasResponseMessage() bool { return false } -// GetContentType returns the ContentType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRule) GetContentType() *string { +// GetStatusCode returns the StatusCode field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRule) GetStatusCode() *int32 { if o == nil { return nil } - return o.ContentType + return o.StatusCode } -// GetContentTypeOk returns a tuple with the ContentType field value +// GetStatusCodeOk returns a tuple with the StatusCode field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRule) GetContentTypeOk() (*string, bool) { +func (o *ApplicationLoadBalancerHttpRule) GetStatusCodeOk() (*int32, bool) { if o == nil { return nil, false } - return o.ContentType, true + return o.StatusCode, true } -// SetContentType sets field value -func (o *ApplicationLoadBalancerHttpRule) SetContentType(v string) { +// SetStatusCode sets field value +func (o *ApplicationLoadBalancerHttpRule) SetStatusCode(v int32) { - o.ContentType = &v + o.StatusCode = &v } -// HasContentType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRule) HasContentType() bool { - if o != nil && o.ContentType != nil { +// HasStatusCode returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRule) HasStatusCode() bool { + if o != nil && o.StatusCode != nil { return true } return false } -// GetConditions returns the Conditions field value -// If the value is explicit nil, the zero value for []ApplicationLoadBalancerHttpRuleCondition will be returned -func (o *ApplicationLoadBalancerHttpRule) GetConditions() *[]ApplicationLoadBalancerHttpRuleCondition { +// GetTargetGroup returns the TargetGroup field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRule) GetTargetGroup() *string { if o == nil { return nil } - return o.Conditions + return o.TargetGroup } -// GetConditionsOk returns a tuple with the Conditions field value +// GetTargetGroupOk returns a tuple with the TargetGroup field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRule) GetConditionsOk() (*[]ApplicationLoadBalancerHttpRuleCondition, bool) { +func (o *ApplicationLoadBalancerHttpRule) GetTargetGroupOk() (*string, bool) { if o == nil { return nil, false } - return o.Conditions, true + return o.TargetGroup, true } -// SetConditions sets field value -func (o *ApplicationLoadBalancerHttpRule) SetConditions(v []ApplicationLoadBalancerHttpRuleCondition) { +// SetTargetGroup sets field value +func (o *ApplicationLoadBalancerHttpRule) SetTargetGroup(v string) { - o.Conditions = &v + o.TargetGroup = &v } -// HasConditions returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRule) HasConditions() bool { - if o != nil && o.Conditions != nil { +// HasTargetGroup returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRule) HasTargetGroup() bool { + if o != nil && o.TargetGroup != nil { + return true + } + + return false +} + +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRule) GetType() *string { + if o == nil { + return nil + } + + return o.Type + +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *ApplicationLoadBalancerHttpRule) GetTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Type, true +} + +// SetType sets field value +func (o *ApplicationLoadBalancerHttpRule) SetType(v string) { + + o.Type = &v + +} + +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRule) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -401,33 +401,42 @@ func (o *ApplicationLoadBalancerHttpRule) HasConditions() bool { func (o ApplicationLoadBalancerHttpRule) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Conditions != nil { + toSerialize["conditions"] = o.Conditions } - if o.TargetGroup != nil { - toSerialize["targetGroup"] = o.TargetGroup + + if o.ContentType != nil { + toSerialize["contentType"] = o.ContentType } + if o.DropQuery != nil { toSerialize["dropQuery"] = o.DropQuery } + if o.Location != nil { toSerialize["location"] = o.Location } - if o.StatusCode != nil { - toSerialize["statusCode"] = o.StatusCode + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.ResponseMessage != nil { toSerialize["responseMessage"] = o.ResponseMessage } - if o.ContentType != nil { - toSerialize["contentType"] = o.ContentType + + if o.StatusCode != nil { + toSerialize["statusCode"] = o.StatusCode } - if o.Conditions != nil { - toSerialize["conditions"] = o.Conditions + + if o.TargetGroup != nil { + toSerialize["targetGroup"] = o.TargetGroup } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go index a61e46532..a2b12adfd 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go @@ -16,14 +16,14 @@ import ( // ApplicationLoadBalancerHttpRuleCondition struct for ApplicationLoadBalancerHttpRuleCondition type ApplicationLoadBalancerHttpRuleCondition struct { - // The HTTP rule condition type. - Type *string `json:"type"` // The matching rule for the HTTP rule condition attribute; this parameter is mandatory for 'HEADER', 'PATH', 'QUERY', 'METHOD', 'HOST', and 'COOKIE' types. It must be 'null' if the type is 'SOURCE_IP'. Condition *string `json:"condition"` - // Specifies whether the condition should be negated; the default value is 'FALSE'. - Negate *bool `json:"negate,omitempty"` // The key can only be set when the HTTP rule condition type is 'COOKIES', 'HEADER', or 'QUERY'. For the type 'PATH', 'METHOD', 'HOST', or 'SOURCE_IP' the value must be 'null'. Key *string `json:"key,omitempty"` + // Specifies whether the condition should be negated; the default value is 'FALSE'. + Negate *bool `json:"negate,omitempty"` + // The HTTP rule condition type. + Type *string `json:"type"` // This parameter is mandatory for the conditions 'CONTAINS', 'EQUALS', 'MATCHES', 'STARTS_WITH', 'ENDS_WITH', or if the type is 'SOURCE_IP'. Specify a valid CIDR. If the condition is 'EXISTS', the value must be 'null'. Value *string `json:"value,omitempty"` } @@ -32,11 +32,11 @@ type ApplicationLoadBalancerHttpRuleCondition struct { // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewApplicationLoadBalancerHttpRuleCondition(type_ string, condition string) *ApplicationLoadBalancerHttpRuleCondition { +func NewApplicationLoadBalancerHttpRuleCondition(condition string, type_ string) *ApplicationLoadBalancerHttpRuleCondition { this := ApplicationLoadBalancerHttpRuleCondition{} - this.Type = &type_ this.Condition = &condition + this.Type = &type_ return &this } @@ -49,76 +49,76 @@ func NewApplicationLoadBalancerHttpRuleConditionWithDefaults() *ApplicationLoadB return &this } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRuleCondition) GetType() *string { +// GetCondition returns the Condition field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRuleCondition) GetCondition() *string { if o == nil { return nil } - return o.Type + return o.Condition } -// GetTypeOk returns a tuple with the Type field value +// GetConditionOk returns a tuple with the Condition field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRuleCondition) GetTypeOk() (*string, bool) { +func (o *ApplicationLoadBalancerHttpRuleCondition) GetConditionOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Condition, true } -// SetType sets field value -func (o *ApplicationLoadBalancerHttpRuleCondition) SetType(v string) { +// SetCondition sets field value +func (o *ApplicationLoadBalancerHttpRuleCondition) SetCondition(v string) { - o.Type = &v + o.Condition = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRuleCondition) HasType() bool { - if o != nil && o.Type != nil { +// HasCondition returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRuleCondition) HasCondition() bool { + if o != nil && o.Condition != nil { return true } return false } -// GetCondition returns the Condition field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRuleCondition) GetCondition() *string { +// GetKey returns the Key field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRuleCondition) GetKey() *string { if o == nil { return nil } - return o.Condition + return o.Key } -// GetConditionOk returns a tuple with the Condition field value +// GetKeyOk returns a tuple with the Key field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRuleCondition) GetConditionOk() (*string, bool) { +func (o *ApplicationLoadBalancerHttpRuleCondition) GetKeyOk() (*string, bool) { if o == nil { return nil, false } - return o.Condition, true + return o.Key, true } -// SetCondition sets field value -func (o *ApplicationLoadBalancerHttpRuleCondition) SetCondition(v string) { +// SetKey sets field value +func (o *ApplicationLoadBalancerHttpRuleCondition) SetKey(v string) { - o.Condition = &v + o.Key = &v } -// HasCondition returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRuleCondition) HasCondition() bool { - if o != nil && o.Condition != nil { +// HasKey returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRuleCondition) HasKey() bool { + if o != nil && o.Key != nil { return true } @@ -126,7 +126,7 @@ func (o *ApplicationLoadBalancerHttpRuleCondition) HasCondition() bool { } // GetNegate returns the Negate field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerHttpRuleCondition) GetNegate() *bool { if o == nil { return nil @@ -163,38 +163,38 @@ func (o *ApplicationLoadBalancerHttpRuleCondition) HasNegate() bool { return false } -// GetKey returns the Key field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerHttpRuleCondition) GetKey() *string { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerHttpRuleCondition) GetType() *string { if o == nil { return nil } - return o.Key + return o.Type } -// GetKeyOk returns a tuple with the Key field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerHttpRuleCondition) GetKeyOk() (*string, bool) { +func (o *ApplicationLoadBalancerHttpRuleCondition) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Key, true + return o.Type, true } -// SetKey sets field value -func (o *ApplicationLoadBalancerHttpRuleCondition) SetKey(v string) { +// SetType sets field value +func (o *ApplicationLoadBalancerHttpRuleCondition) SetType(v string) { - o.Key = &v + o.Type = &v } -// HasKey returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerHttpRuleCondition) HasKey() bool { - if o != nil && o.Key != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerHttpRuleCondition) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -202,7 +202,7 @@ func (o *ApplicationLoadBalancerHttpRuleCondition) HasKey() bool { } // GetValue returns the Value field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerHttpRuleCondition) GetValue() *string { if o == nil { return nil @@ -241,21 +241,26 @@ func (o *ApplicationLoadBalancerHttpRuleCondition) HasValue() bool { func (o ApplicationLoadBalancerHttpRuleCondition) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Condition != nil { toSerialize["condition"] = o.Condition } + + if o.Key != nil { + toSerialize["key"] = o.Key + } + if o.Negate != nil { toSerialize["negate"] = o.Negate } - if o.Key != nil { - toSerialize["key"] = o.Key + + if o.Type != nil { + toSerialize["type"] = o.Type } + if o.Value != nil { toSerialize["value"] = o.Value } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go index 5055570bd..8c62ecc10 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go @@ -16,27 +16,27 @@ import ( // ApplicationLoadBalancerProperties struct for ApplicationLoadBalancerProperties type ApplicationLoadBalancerProperties struct { - // The Application Load Balancer name. - Name *string `json:"name"` - // The ID of the listening (inbound) LAN. - ListenerLan *int32 `json:"listenerLan"` // Collection of the Application Load Balancer IP addresses. (Inbound and outbound) IPs of the 'listenerLan' are customer-reserved public IPs for the public load balancers, and private IPs for the private load balancers. Ips *[]string `json:"ips,omitempty"` - // The ID of the balanced private target LAN (outbound). - TargetLan *int32 `json:"targetLan"` // Collection of private IP addresses with the subnet mask of the Application Load Balancer. IPs must contain valid a subnet mask. If no IP is provided, the system will generate an IP with /24 subnet. LbPrivateIps *[]string `json:"lbPrivateIps,omitempty"` + // The ID of the listening (inbound) LAN. + ListenerLan *int32 `json:"listenerLan"` + // The Application Load Balancer name. + Name *string `json:"name"` + // The ID of the balanced private target LAN (outbound). + TargetLan *int32 `json:"targetLan"` } // NewApplicationLoadBalancerProperties instantiates a new ApplicationLoadBalancerProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewApplicationLoadBalancerProperties(name string, listenerLan int32, targetLan int32) *ApplicationLoadBalancerProperties { +func NewApplicationLoadBalancerProperties(listenerLan int32, name string, targetLan int32) *ApplicationLoadBalancerProperties { this := ApplicationLoadBalancerProperties{} - this.Name = &name this.ListenerLan = &listenerLan + this.Name = &name this.TargetLan = &targetLan return &this @@ -50,38 +50,76 @@ func NewApplicationLoadBalancerPropertiesWithDefaults() *ApplicationLoadBalancer return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerProperties) GetName() *string { +// GetIps returns the Ips field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerProperties) GetIps() *[]string { if o == nil { return nil } - return o.Name + return o.Ips } -// GetNameOk returns a tuple with the Name field value +// GetIpsOk returns a tuple with the Ips field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerProperties) GetNameOk() (*string, bool) { +func (o *ApplicationLoadBalancerProperties) GetIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Ips, true } -// SetName sets field value -func (o *ApplicationLoadBalancerProperties) SetName(v string) { +// SetIps sets field value +func (o *ApplicationLoadBalancerProperties) SetIps(v []string) { - o.Name = &v + o.Ips = &v } -// HasName returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasIps returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerProperties) HasIps() bool { + if o != nil && o.Ips != nil { + return true + } + + return false +} + +// GetLbPrivateIps returns the LbPrivateIps field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerProperties) GetLbPrivateIps() *[]string { + if o == nil { + return nil + } + + return o.LbPrivateIps + +} + +// GetLbPrivateIpsOk returns a tuple with the LbPrivateIps field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *ApplicationLoadBalancerProperties) GetLbPrivateIpsOk() (*[]string, bool) { + if o == nil { + return nil, false + } + + return o.LbPrivateIps, true +} + +// SetLbPrivateIps sets field value +func (o *ApplicationLoadBalancerProperties) SetLbPrivateIps(v []string) { + + o.LbPrivateIps = &v + +} + +// HasLbPrivateIps returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerProperties) HasLbPrivateIps() bool { + if o != nil && o.LbPrivateIps != nil { return true } @@ -89,7 +127,7 @@ func (o *ApplicationLoadBalancerProperties) HasName() bool { } // GetListenerLan returns the ListenerLan field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerProperties) GetListenerLan() *int32 { if o == nil { return nil @@ -126,38 +164,38 @@ func (o *ApplicationLoadBalancerProperties) HasListenerLan() bool { return false } -// GetIps returns the Ips field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *ApplicationLoadBalancerProperties) GetIps() *[]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerProperties) GetName() *string { if o == nil { return nil } - return o.Ips + return o.Name } -// GetIpsOk returns a tuple with the Ips field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerProperties) GetIpsOk() (*[]string, bool) { +func (o *ApplicationLoadBalancerProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Ips, true + return o.Name, true } -// SetIps sets field value -func (o *ApplicationLoadBalancerProperties) SetIps(v []string) { +// SetName sets field value +func (o *ApplicationLoadBalancerProperties) SetName(v string) { - o.Ips = &v + o.Name = &v } -// HasIps returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerProperties) HasIps() bool { - if o != nil && o.Ips != nil { +// HasName returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -165,7 +203,7 @@ func (o *ApplicationLoadBalancerProperties) HasIps() bool { } // GetTargetLan returns the TargetLan field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancerProperties) GetTargetLan() *int32 { if o == nil { return nil @@ -202,61 +240,28 @@ func (o *ApplicationLoadBalancerProperties) HasTargetLan() bool { return false } -// GetLbPrivateIps returns the LbPrivateIps field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *ApplicationLoadBalancerProperties) GetLbPrivateIps() *[]string { - if o == nil { - return nil +func (o ApplicationLoadBalancerProperties) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if o.Ips != nil { + toSerialize["ips"] = o.Ips } - return o.LbPrivateIps - -} - -// GetLbPrivateIpsOk returns a tuple with the LbPrivateIps field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerProperties) GetLbPrivateIpsOk() (*[]string, bool) { - if o == nil { - return nil, false + if o.LbPrivateIps != nil { + toSerialize["lbPrivateIps"] = o.LbPrivateIps } - return o.LbPrivateIps, true -} - -// SetLbPrivateIps sets field value -func (o *ApplicationLoadBalancerProperties) SetLbPrivateIps(v []string) { - - o.LbPrivateIps = &v - -} - -// HasLbPrivateIps returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerProperties) HasLbPrivateIps() bool { - if o != nil && o.LbPrivateIps != nil { - return true + if o.ListenerLan != nil { + toSerialize["listenerLan"] = o.ListenerLan } - return false -} - -func (o ApplicationLoadBalancerProperties) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} if o.Name != nil { toSerialize["name"] = o.Name } - if o.ListenerLan != nil { - toSerialize["listenerLan"] = o.ListenerLan - } - if o.Ips != nil { - toSerialize["ips"] = o.Ips - } + if o.TargetLan != nil { toSerialize["targetLan"] = o.TargetLan } - if o.LbPrivateIps != nil { - toSerialize["lbPrivateIps"] = o.LbPrivateIps - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go index ce188b2d8..79a5fa1c7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go @@ -16,13 +16,13 @@ import ( // ApplicationLoadBalancerPut struct for ApplicationLoadBalancerPut type ApplicationLoadBalancerPut struct { + // The URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *ApplicationLoadBalancerProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *ApplicationLoadBalancerProperties `json:"properties"` } // NewApplicationLoadBalancerPut instantiates a new ApplicationLoadBalancerPut object @@ -45,152 +45,152 @@ func NewApplicationLoadBalancerPutWithDefaults() *ApplicationLoadBalancerPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerPut) GetIdOk() (*string, bool) { +func (o *ApplicationLoadBalancerPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *ApplicationLoadBalancerPut) SetId(v string) { +// SetHref sets field value +func (o *ApplicationLoadBalancerPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ApplicationLoadBalancerPut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerPut) GetTypeOk() (*Type, bool) { +func (o *ApplicationLoadBalancerPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *ApplicationLoadBalancerPut) SetType(v Type) { +// SetId sets field value +func (o *ApplicationLoadBalancerPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancerPut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerPut) GetProperties() *ApplicationLoadBalancerProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerPut) GetHrefOk() (*string, bool) { +func (o *ApplicationLoadBalancerPut) GetPropertiesOk() (*ApplicationLoadBalancerProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *ApplicationLoadBalancerPut) SetHref(v string) { +// SetProperties sets field value +func (o *ApplicationLoadBalancerPut) SetProperties(v ApplicationLoadBalancerProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ApplicationLoadBalancerProperties will be returned -func (o *ApplicationLoadBalancerPut) GetProperties() *ApplicationLoadBalancerProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancerPut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancerPut) GetPropertiesOk() (*ApplicationLoadBalancerProperties, bool) { +func (o *ApplicationLoadBalancerPut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *ApplicationLoadBalancerPut) SetProperties(v ApplicationLoadBalancerProperties) { +// SetType sets field value +func (o *ApplicationLoadBalancerPut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *ApplicationLoadBalancerPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancerPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *ApplicationLoadBalancerPut) HasProperties() bool { func (o ApplicationLoadBalancerPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go index a3eff3d6a..a2f3a9e04 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go @@ -16,19 +16,19 @@ import ( // ApplicationLoadBalancers struct for ApplicationLoadBalancers type ApplicationLoadBalancers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]ApplicationLoadBalancer `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewApplicationLoadBalancers instantiates a new ApplicationLoadBalancers object @@ -49,114 +49,114 @@ func NewApplicationLoadBalancersWithDefaults() *ApplicationLoadBalancers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancers) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancers) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancers) GetIdOk() (*string, bool) { +func (o *ApplicationLoadBalancers) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *ApplicationLoadBalancers) SetId(v string) { +// SetLinks sets field value +func (o *ApplicationLoadBalancers) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *ApplicationLoadBalancers) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *ApplicationLoadBalancers) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ApplicationLoadBalancers) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancers) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancers) GetTypeOk() (*Type, bool) { +func (o *ApplicationLoadBalancers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *ApplicationLoadBalancers) SetType(v Type) { +// SetHref sets field value +func (o *ApplicationLoadBalancers) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *ApplicationLoadBalancers) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *ApplicationLoadBalancers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ApplicationLoadBalancers) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancers) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancers) GetHrefOk() (*string, bool) { +func (o *ApplicationLoadBalancers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *ApplicationLoadBalancers) SetHref(v string) { +// SetId sets field value +func (o *ApplicationLoadBalancers) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *ApplicationLoadBalancers) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *ApplicationLoadBalancers) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *ApplicationLoadBalancers) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []ApplicationLoadBalancer will be returned +// If the value is explicit nil, nil is returned func (o *ApplicationLoadBalancers) GetItems() *[]ApplicationLoadBalancer { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *ApplicationLoadBalancers) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *ApplicationLoadBalancers) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancers) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancers) GetOffsetOk() (*float32, bool) { +func (o *ApplicationLoadBalancers) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *ApplicationLoadBalancers) SetOffset(v float32) { +// SetLimit sets field value +func (o *ApplicationLoadBalancers) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *ApplicationLoadBalancers) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *ApplicationLoadBalancers) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *ApplicationLoadBalancers) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancers) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancers) GetLimitOk() (*float32, bool) { +func (o *ApplicationLoadBalancers) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *ApplicationLoadBalancers) SetLimit(v float32) { +// SetOffset sets field value +func (o *ApplicationLoadBalancers) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *ApplicationLoadBalancers) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *ApplicationLoadBalancers) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *ApplicationLoadBalancers) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ApplicationLoadBalancers) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ApplicationLoadBalancers) GetLinksOk() (*PaginationLinks, bool) { +func (o *ApplicationLoadBalancers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *ApplicationLoadBalancers) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *ApplicationLoadBalancers) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *ApplicationLoadBalancers) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *ApplicationLoadBalancers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *ApplicationLoadBalancers) HasLinks() bool { func (o ApplicationLoadBalancers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go index 3791f488e..fdd81e425 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go @@ -16,19 +16,19 @@ import ( // AttachedVolumes struct for AttachedVolumes type AttachedVolumes struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Volume `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewAttachedVolumes instantiates a new AttachedVolumes object @@ -49,114 +49,114 @@ func NewAttachedVolumesWithDefaults() *AttachedVolumes { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *AttachedVolumes) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *AttachedVolumes) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *AttachedVolumes) GetIdOk() (*string, bool) { +func (o *AttachedVolumes) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *AttachedVolumes) SetId(v string) { +// SetLinks sets field value +func (o *AttachedVolumes) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *AttachedVolumes) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *AttachedVolumes) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *AttachedVolumes) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *AttachedVolumes) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *AttachedVolumes) GetTypeOk() (*Type, bool) { +func (o *AttachedVolumes) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *AttachedVolumes) SetType(v Type) { +// SetHref sets field value +func (o *AttachedVolumes) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *AttachedVolumes) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *AttachedVolumes) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *AttachedVolumes) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *AttachedVolumes) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *AttachedVolumes) GetHrefOk() (*string, bool) { +func (o *AttachedVolumes) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *AttachedVolumes) SetHref(v string) { +// SetId sets field value +func (o *AttachedVolumes) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *AttachedVolumes) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *AttachedVolumes) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *AttachedVolumes) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Volume will be returned +// If the value is explicit nil, nil is returned func (o *AttachedVolumes) GetItems() *[]Volume { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *AttachedVolumes) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *AttachedVolumes) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *AttachedVolumes) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *AttachedVolumes) GetOffsetOk() (*float32, bool) { +func (o *AttachedVolumes) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *AttachedVolumes) SetOffset(v float32) { +// SetLimit sets field value +func (o *AttachedVolumes) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *AttachedVolumes) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *AttachedVolumes) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *AttachedVolumes) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *AttachedVolumes) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *AttachedVolumes) GetLimitOk() (*float32, bool) { +func (o *AttachedVolumes) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *AttachedVolumes) SetLimit(v float32) { +// SetOffset sets field value +func (o *AttachedVolumes) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *AttachedVolumes) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *AttachedVolumes) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *AttachedVolumes) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *AttachedVolumes) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *AttachedVolumes) GetLinksOk() (*PaginationLinks, bool) { +func (o *AttachedVolumes) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *AttachedVolumes) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *AttachedVolumes) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *AttachedVolumes) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *AttachedVolumes) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *AttachedVolumes) HasLinks() bool { func (o AttachedVolumes) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit.go index bc9197db9..6f1ff7c47 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit.go @@ -16,14 +16,14 @@ import ( // BackupUnit struct for BackupUnit type BackupUnit struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *string `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *BackupUnitProperties `json:"properties"` + // The type of object that has been created. + Type *string `json:"type,omitempty"` } // NewBackupUnit instantiates a new BackupUnit object @@ -46,190 +46,190 @@ func NewBackupUnitWithDefaults() *BackupUnit { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnit) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *BackupUnit) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnit) GetIdOk() (*string, bool) { +func (o *BackupUnit) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *BackupUnit) SetId(v string) { +// SetHref sets field value +func (o *BackupUnit) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *BackupUnit) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *BackupUnit) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnit) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *BackupUnit) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnit) GetTypeOk() (*string, bool) { +func (o *BackupUnit) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *BackupUnit) SetType(v string) { +// SetId sets field value +func (o *BackupUnit) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *BackupUnit) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *BackupUnit) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnit) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *BackupUnit) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnit) GetHrefOk() (*string, bool) { +func (o *BackupUnit) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *BackupUnit) SetHref(v string) { +// SetMetadata sets field value +func (o *BackupUnit) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *BackupUnit) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *BackupUnit) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *BackupUnit) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *BackupUnit) GetProperties() *BackupUnitProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnit) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *BackupUnit) GetPropertiesOk() (*BackupUnitProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *BackupUnit) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *BackupUnit) SetProperties(v BackupUnitProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *BackupUnit) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *BackupUnit) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for BackupUnitProperties will be returned -func (o *BackupUnit) GetProperties() *BackupUnitProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *BackupUnit) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnit) GetPropertiesOk() (*BackupUnitProperties, bool) { +func (o *BackupUnit) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *BackupUnit) SetProperties(v BackupUnitProperties) { +// SetType sets field value +func (o *BackupUnit) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *BackupUnit) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *BackupUnit) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *BackupUnit) HasProperties() bool { func (o BackupUnit) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_properties.go index 45b5584e2..64698d356 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_properties.go @@ -16,12 +16,12 @@ import ( // BackupUnitProperties struct for BackupUnitProperties type BackupUnitProperties struct { + // The email associated with the backup unit. Bear in mind that this email does not be the same email as of the user. + Email *string `json:"email,omitempty"` // The name of the resource (alphanumeric characters only). Name *string `json:"name"` // The password associated with that resource. Password *string `json:"password,omitempty"` - // The email associated with the backup unit. Bear in mind that this email does not be the same email as of the user. - Email *string `json:"email,omitempty"` } // NewBackupUnitProperties instantiates a new BackupUnitProperties object @@ -44,114 +44,114 @@ func NewBackupUnitPropertiesWithDefaults() *BackupUnitProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnitProperties) GetName() *string { +// GetEmail returns the Email field value +// If the value is explicit nil, nil is returned +func (o *BackupUnitProperties) GetEmail() *string { if o == nil { return nil } - return o.Name + return o.Email } -// GetNameOk returns a tuple with the Name field value +// GetEmailOk returns a tuple with the Email field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnitProperties) GetNameOk() (*string, bool) { +func (o *BackupUnitProperties) GetEmailOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Email, true } -// SetName sets field value -func (o *BackupUnitProperties) SetName(v string) { +// SetEmail sets field value +func (o *BackupUnitProperties) SetEmail(v string) { - o.Name = &v + o.Email = &v } -// HasName returns a boolean if a field has been set. -func (o *BackupUnitProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasEmail returns a boolean if a field has been set. +func (o *BackupUnitProperties) HasEmail() bool { + if o != nil && o.Email != nil { return true } return false } -// GetPassword returns the Password field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnitProperties) GetPassword() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *BackupUnitProperties) GetName() *string { if o == nil { return nil } - return o.Password + return o.Name } -// GetPasswordOk returns a tuple with the Password field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnitProperties) GetPasswordOk() (*string, bool) { +func (o *BackupUnitProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Password, true + return o.Name, true } -// SetPassword sets field value -func (o *BackupUnitProperties) SetPassword(v string) { +// SetName sets field value +func (o *BackupUnitProperties) SetName(v string) { - o.Password = &v + o.Name = &v } -// HasPassword returns a boolean if a field has been set. -func (o *BackupUnitProperties) HasPassword() bool { - if o != nil && o.Password != nil { +// HasName returns a boolean if a field has been set. +func (o *BackupUnitProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetEmail returns the Email field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnitProperties) GetEmail() *string { +// GetPassword returns the Password field value +// If the value is explicit nil, nil is returned +func (o *BackupUnitProperties) GetPassword() *string { if o == nil { return nil } - return o.Email + return o.Password } -// GetEmailOk returns a tuple with the Email field value +// GetPasswordOk returns a tuple with the Password field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnitProperties) GetEmailOk() (*string, bool) { +func (o *BackupUnitProperties) GetPasswordOk() (*string, bool) { if o == nil { return nil, false } - return o.Email, true + return o.Password, true } -// SetEmail sets field value -func (o *BackupUnitProperties) SetEmail(v string) { +// SetPassword sets field value +func (o *BackupUnitProperties) SetPassword(v string) { - o.Email = &v + o.Password = &v } -// HasEmail returns a boolean if a field has been set. -func (o *BackupUnitProperties) HasEmail() bool { - if o != nil && o.Email != nil { +// HasPassword returns a boolean if a field has been set. +func (o *BackupUnitProperties) HasPassword() bool { + if o != nil && o.Password != nil { return true } @@ -160,15 +160,18 @@ func (o *BackupUnitProperties) HasEmail() bool { func (o BackupUnitProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.Email != nil { + toSerialize["email"] = o.Email + } + if o.Name != nil { toSerialize["name"] = o.Name } + if o.Password != nil { toSerialize["password"] = o.Password } - if o.Email != nil { - toSerialize["email"] = o.Email - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_sso.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_sso.go index f493bf950..ea6ba4159 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_sso.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_unit_sso.go @@ -39,7 +39,7 @@ func NewBackupUnitSSOWithDefaults() *BackupUnitSSO { } // GetSsoUrl returns the SsoUrl field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *BackupUnitSSO) GetSsoUrl() *string { if o == nil { return nil @@ -81,6 +81,7 @@ func (o BackupUnitSSO) MarshalJSON() ([]byte, error) { if o.SsoUrl != nil { toSerialize["ssoUrl"] = o.SsoUrl } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_units.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_units.go index 6023f94e1..280d50c61 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_units.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_backup_units.go @@ -16,14 +16,14 @@ import ( // BackupUnits struct for BackupUnits type BackupUnits struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *string `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]BackupUnit `json:"items,omitempty"` + // The type of object that has been created. + Type *string `json:"type,omitempty"` } // NewBackupUnits instantiates a new BackupUnits object @@ -44,152 +44,152 @@ func NewBackupUnitsWithDefaults() *BackupUnits { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnits) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *BackupUnits) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnits) GetIdOk() (*string, bool) { +func (o *BackupUnits) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *BackupUnits) SetId(v string) { +// SetHref sets field value +func (o *BackupUnits) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *BackupUnits) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *BackupUnits) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnits) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *BackupUnits) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnits) GetTypeOk() (*string, bool) { +func (o *BackupUnits) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *BackupUnits) SetType(v string) { +// SetId sets field value +func (o *BackupUnits) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *BackupUnits) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *BackupUnits) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BackupUnits) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *BackupUnits) GetItems() *[]BackupUnit { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnits) GetHrefOk() (*string, bool) { +func (o *BackupUnits) GetItemsOk() (*[]BackupUnit, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *BackupUnits) SetHref(v string) { +// SetItems sets field value +func (o *BackupUnits) SetItems(v []BackupUnit) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *BackupUnits) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *BackupUnits) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []BackupUnit will be returned -func (o *BackupUnits) GetItems() *[]BackupUnit { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *BackupUnits) GetType() *string { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BackupUnits) GetItemsOk() (*[]BackupUnit, bool) { +func (o *BackupUnits) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *BackupUnits) SetItems(v []BackupUnit) { +// SetType sets field value +func (o *BackupUnits) SetType(v string) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *BackupUnits) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *BackupUnits) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *BackupUnits) HasItems() bool { func (o BackupUnits) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_balanced_nics.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_balanced_nics.go index feef17f62..f9338b578 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_balanced_nics.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_balanced_nics.go @@ -16,19 +16,19 @@ import ( // BalancedNics struct for BalancedNics type BalancedNics struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Nic `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewBalancedNics instantiates a new BalancedNics object @@ -49,114 +49,114 @@ func NewBalancedNicsWithDefaults() *BalancedNics { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BalancedNics) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *BalancedNics) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BalancedNics) GetIdOk() (*string, bool) { +func (o *BalancedNics) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *BalancedNics) SetId(v string) { +// SetLinks sets field value +func (o *BalancedNics) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *BalancedNics) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *BalancedNics) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *BalancedNics) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *BalancedNics) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BalancedNics) GetTypeOk() (*Type, bool) { +func (o *BalancedNics) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *BalancedNics) SetType(v Type) { +// SetHref sets field value +func (o *BalancedNics) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *BalancedNics) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *BalancedNics) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *BalancedNics) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *BalancedNics) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BalancedNics) GetHrefOk() (*string, bool) { +func (o *BalancedNics) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *BalancedNics) SetHref(v string) { +// SetId sets field value +func (o *BalancedNics) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *BalancedNics) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *BalancedNics) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *BalancedNics) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Nic will be returned +// If the value is explicit nil, nil is returned func (o *BalancedNics) GetItems() *[]Nic { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *BalancedNics) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *BalancedNics) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *BalancedNics) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BalancedNics) GetOffsetOk() (*float32, bool) { +func (o *BalancedNics) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *BalancedNics) SetOffset(v float32) { +// SetLimit sets field value +func (o *BalancedNics) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *BalancedNics) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *BalancedNics) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *BalancedNics) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *BalancedNics) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BalancedNics) GetLimitOk() (*float32, bool) { +func (o *BalancedNics) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *BalancedNics) SetLimit(v float32) { +// SetOffset sets field value +func (o *BalancedNics) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *BalancedNics) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *BalancedNics) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *BalancedNics) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *BalancedNics) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *BalancedNics) GetLinksOk() (*PaginationLinks, bool) { +func (o *BalancedNics) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *BalancedNics) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *BalancedNics) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *BalancedNics) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *BalancedNics) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *BalancedNics) HasLinks() bool { func (o BalancedNics) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_cdroms.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_cdroms.go index 4d2bb9a85..86289fa1d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_cdroms.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_cdroms.go @@ -16,19 +16,19 @@ import ( // Cdroms struct for Cdroms type Cdroms struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Image `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewCdroms instantiates a new Cdroms object @@ -49,114 +49,114 @@ func NewCdromsWithDefaults() *Cdroms { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Cdroms) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Cdroms) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Cdroms) GetIdOk() (*string, bool) { +func (o *Cdroms) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Cdroms) SetId(v string) { +// SetLinks sets field value +func (o *Cdroms) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Cdroms) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Cdroms) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Cdroms) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Cdroms) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Cdroms) GetTypeOk() (*Type, bool) { +func (o *Cdroms) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Cdroms) SetType(v Type) { +// SetHref sets field value +func (o *Cdroms) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Cdroms) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Cdroms) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Cdroms) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Cdroms) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Cdroms) GetHrefOk() (*string, bool) { +func (o *Cdroms) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Cdroms) SetHref(v string) { +// SetId sets field value +func (o *Cdroms) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Cdroms) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Cdroms) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Cdroms) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Image will be returned +// If the value is explicit nil, nil is returned func (o *Cdroms) GetItems() *[]Image { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Cdroms) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Cdroms) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Cdroms) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Cdroms) GetOffsetOk() (*float32, bool) { +func (o *Cdroms) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Cdroms) SetOffset(v float32) { +// SetLimit sets field value +func (o *Cdroms) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Cdroms) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Cdroms) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Cdroms) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Cdroms) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Cdroms) GetLimitOk() (*float32, bool) { +func (o *Cdroms) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Cdroms) SetLimit(v float32) { +// SetOffset sets field value +func (o *Cdroms) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Cdroms) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Cdroms) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Cdroms) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Cdroms) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Cdroms) GetLinksOk() (*PaginationLinks, bool) { +func (o *Cdroms) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Cdroms) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Cdroms) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Cdroms) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Cdroms) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Cdroms) HasLinks() bool { func (o Cdroms) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_connectable_datacenter.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_connectable_datacenter.go index 6b8a5f4cd..778a5bdd0 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_connectable_datacenter.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_connectable_datacenter.go @@ -17,8 +17,8 @@ import ( // ConnectableDatacenter struct for ConnectableDatacenter type ConnectableDatacenter struct { Id *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` } // NewConnectableDatacenter instantiates a new ConnectableDatacenter object @@ -40,7 +40,7 @@ func NewConnectableDatacenterWithDefaults() *ConnectableDatacenter { } // GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ConnectableDatacenter) GetId() *string { if o == nil { return nil @@ -77,76 +77,76 @@ func (o *ConnectableDatacenter) HasId() bool { return false } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ConnectableDatacenter) GetName() *string { +// GetLocation returns the Location field value +// If the value is explicit nil, nil is returned +func (o *ConnectableDatacenter) GetLocation() *string { if o == nil { return nil } - return o.Name + return o.Location } -// GetNameOk returns a tuple with the Name field value +// GetLocationOk returns a tuple with the Location field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ConnectableDatacenter) GetNameOk() (*string, bool) { +func (o *ConnectableDatacenter) GetLocationOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Location, true } -// SetName sets field value -func (o *ConnectableDatacenter) SetName(v string) { +// SetLocation sets field value +func (o *ConnectableDatacenter) SetLocation(v string) { - o.Name = &v + o.Location = &v } -// HasName returns a boolean if a field has been set. -func (o *ConnectableDatacenter) HasName() bool { - if o != nil && o.Name != nil { +// HasLocation returns a boolean if a field has been set. +func (o *ConnectableDatacenter) HasLocation() bool { + if o != nil && o.Location != nil { return true } return false } -// GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ConnectableDatacenter) GetLocation() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *ConnectableDatacenter) GetName() *string { if o == nil { return nil } - return o.Location + return o.Name } -// GetLocationOk returns a tuple with the Location field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ConnectableDatacenter) GetLocationOk() (*string, bool) { +func (o *ConnectableDatacenter) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Location, true + return o.Name, true } -// SetLocation sets field value -func (o *ConnectableDatacenter) SetLocation(v string) { +// SetName sets field value +func (o *ConnectableDatacenter) SetName(v string) { - o.Location = &v + o.Name = &v } -// HasLocation returns a boolean if a field has been set. -func (o *ConnectableDatacenter) HasLocation() bool { - if o != nil && o.Location != nil { +// HasName returns a boolean if a field has been set. +func (o *ConnectableDatacenter) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -158,12 +158,15 @@ func (o ConnectableDatacenter) MarshalJSON() ([]byte, error) { if o.Id != nil { toSerialize["id"] = o.Id } - if o.Name != nil { - toSerialize["name"] = o.Name - } + if o.Location != nil { toSerialize["location"] = o.Location } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract.go index e1e05792e..d3ce413e1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract.go @@ -16,9 +16,9 @@ import ( // Contract struct for Contract type Contract struct { - // The type of the resource. - Type *Type `json:"type,omitempty"` Properties *ContractProperties `json:"properties"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewContract instantiates a new Contract object @@ -41,76 +41,76 @@ func NewContractWithDefaults() *Contract { return &this } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Contract) GetType() *Type { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Contract) GetProperties() *ContractProperties { if o == nil { return nil } - return o.Type + return o.Properties } -// GetTypeOk returns a tuple with the Type field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Contract) GetTypeOk() (*Type, bool) { +func (o *Contract) GetPropertiesOk() (*ContractProperties, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Properties, true } -// SetType sets field value -func (o *Contract) SetType(v Type) { +// SetProperties sets field value +func (o *Contract) SetProperties(v ContractProperties) { - o.Type = &v + o.Properties = &v } -// HasType returns a boolean if a field has been set. -func (o *Contract) HasType() bool { - if o != nil && o.Type != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Contract) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ContractProperties will be returned -func (o *Contract) GetProperties() *ContractProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Contract) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Contract) GetPropertiesOk() (*ContractProperties, bool) { +func (o *Contract) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Contract) SetProperties(v ContractProperties) { +// SetType sets field value +func (o *Contract) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Contract) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Contract) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -119,12 +119,14 @@ func (o *Contract) HasProperties() bool { func (o Contract) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go index 522f61829..a579ea3b7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go @@ -20,11 +20,11 @@ type ContractProperties struct { ContractNumber *int64 `json:"contractNumber,omitempty"` // The contract owner's user name. Owner *string `json:"owner,omitempty"` - // The contract status. - Status *string `json:"status,omitempty"` // The registration domain of the contract. RegDomain *string `json:"regDomain,omitempty"` ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"` + // The contract status. + Status *string `json:"status,omitempty"` } // NewContractProperties instantiates a new ContractProperties object @@ -46,7 +46,7 @@ func NewContractPropertiesWithDefaults() *ContractProperties { } // GetContractNumber returns the ContractNumber field value -// If the value is explicit nil, the zero value for int64 will be returned +// If the value is explicit nil, nil is returned func (o *ContractProperties) GetContractNumber() *int64 { if o == nil { return nil @@ -84,7 +84,7 @@ func (o *ContractProperties) HasContractNumber() bool { } // GetOwner returns the Owner field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ContractProperties) GetOwner() *string { if o == nil { return nil @@ -121,114 +121,114 @@ func (o *ContractProperties) HasOwner() bool { return false } -// GetStatus returns the Status field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ContractProperties) GetStatus() *string { +// GetRegDomain returns the RegDomain field value +// If the value is explicit nil, nil is returned +func (o *ContractProperties) GetRegDomain() *string { if o == nil { return nil } - return o.Status + return o.RegDomain } -// GetStatusOk returns a tuple with the Status field value +// GetRegDomainOk returns a tuple with the RegDomain field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ContractProperties) GetStatusOk() (*string, bool) { +func (o *ContractProperties) GetRegDomainOk() (*string, bool) { if o == nil { return nil, false } - return o.Status, true + return o.RegDomain, true } -// SetStatus sets field value -func (o *ContractProperties) SetStatus(v string) { +// SetRegDomain sets field value +func (o *ContractProperties) SetRegDomain(v string) { - o.Status = &v + o.RegDomain = &v } -// HasStatus returns a boolean if a field has been set. -func (o *ContractProperties) HasStatus() bool { - if o != nil && o.Status != nil { +// HasRegDomain returns a boolean if a field has been set. +func (o *ContractProperties) HasRegDomain() bool { + if o != nil && o.RegDomain != nil { return true } return false } -// GetRegDomain returns the RegDomain field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ContractProperties) GetRegDomain() *string { +// GetResourceLimits returns the ResourceLimits field value +// If the value is explicit nil, nil is returned +func (o *ContractProperties) GetResourceLimits() *ResourceLimits { if o == nil { return nil } - return o.RegDomain + return o.ResourceLimits } -// GetRegDomainOk returns a tuple with the RegDomain field value +// GetResourceLimitsOk returns a tuple with the ResourceLimits field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ContractProperties) GetRegDomainOk() (*string, bool) { +func (o *ContractProperties) GetResourceLimitsOk() (*ResourceLimits, bool) { if o == nil { return nil, false } - return o.RegDomain, true + return o.ResourceLimits, true } -// SetRegDomain sets field value -func (o *ContractProperties) SetRegDomain(v string) { +// SetResourceLimits sets field value +func (o *ContractProperties) SetResourceLimits(v ResourceLimits) { - o.RegDomain = &v + o.ResourceLimits = &v } -// HasRegDomain returns a boolean if a field has been set. -func (o *ContractProperties) HasRegDomain() bool { - if o != nil && o.RegDomain != nil { +// HasResourceLimits returns a boolean if a field has been set. +func (o *ContractProperties) HasResourceLimits() bool { + if o != nil && o.ResourceLimits != nil { return true } return false } -// GetResourceLimits returns the ResourceLimits field value -// If the value is explicit nil, the zero value for ResourceLimits will be returned -func (o *ContractProperties) GetResourceLimits() *ResourceLimits { +// GetStatus returns the Status field value +// If the value is explicit nil, nil is returned +func (o *ContractProperties) GetStatus() *string { if o == nil { return nil } - return o.ResourceLimits + return o.Status } -// GetResourceLimitsOk returns a tuple with the ResourceLimits field value +// GetStatusOk returns a tuple with the Status field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ContractProperties) GetResourceLimitsOk() (*ResourceLimits, bool) { +func (o *ContractProperties) GetStatusOk() (*string, bool) { if o == nil { return nil, false } - return o.ResourceLimits, true + return o.Status, true } -// SetResourceLimits sets field value -func (o *ContractProperties) SetResourceLimits(v ResourceLimits) { +// SetStatus sets field value +func (o *ContractProperties) SetStatus(v string) { - o.ResourceLimits = &v + o.Status = &v } -// HasResourceLimits returns a boolean if a field has been set. -func (o *ContractProperties) HasResourceLimits() bool { - if o != nil && o.ResourceLimits != nil { +// HasStatus returns a boolean if a field has been set. +func (o *ContractProperties) HasStatus() bool { + if o != nil && o.Status != nil { return true } @@ -240,18 +240,23 @@ func (o ContractProperties) MarshalJSON() ([]byte, error) { if o.ContractNumber != nil { toSerialize["contractNumber"] = o.ContractNumber } + if o.Owner != nil { toSerialize["owner"] = o.Owner } - if o.Status != nil { - toSerialize["status"] = o.Status - } + if o.RegDomain != nil { toSerialize["regDomain"] = o.RegDomain } + if o.ResourceLimits != nil { toSerialize["resourceLimits"] = o.ResourceLimits } + + if o.Status != nil { + toSerialize["status"] = o.Status + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go index c6fc46e31..61e2f9aa9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go @@ -16,14 +16,14 @@ import ( // Contracts struct for Contracts type Contracts struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Contract `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewContracts instantiates a new Contracts object @@ -44,152 +44,152 @@ func NewContractsWithDefaults() *Contracts { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Contracts) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Contracts) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Contracts) GetIdOk() (*string, bool) { +func (o *Contracts) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Contracts) SetId(v string) { +// SetHref sets field value +func (o *Contracts) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Contracts) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Contracts) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Contracts) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Contracts) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Contracts) GetTypeOk() (*Type, bool) { +func (o *Contracts) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Contracts) SetType(v Type) { +// SetId sets field value +func (o *Contracts) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Contracts) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Contracts) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Contracts) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Contracts) GetItems() *[]Contract { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Contracts) GetHrefOk() (*string, bool) { +func (o *Contracts) GetItemsOk() (*[]Contract, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Contracts) SetHref(v string) { +// SetItems sets field value +func (o *Contracts) SetItems(v []Contract) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Contracts) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Contracts) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Contract will be returned -func (o *Contracts) GetItems() *[]Contract { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Contracts) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Contracts) GetItemsOk() (*[]Contract, bool) { +func (o *Contracts) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Contracts) SetItems(v []Contract) { +// SetType sets field value +func (o *Contracts) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Contracts) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Contracts) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Contracts) HasItems() bool { func (o Contracts) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_cpu_architecture_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_cpu_architecture_properties.go index 80c6b6c24..c86a3899d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_cpu_architecture_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_cpu_architecture_properties.go @@ -45,7 +45,7 @@ func NewCpuArchitecturePropertiesWithDefaults() *CpuArchitectureProperties { } // GetCpuFamily returns the CpuFamily field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *CpuArchitectureProperties) GetCpuFamily() *string { if o == nil { return nil @@ -83,7 +83,7 @@ func (o *CpuArchitectureProperties) HasCpuFamily() bool { } // GetMaxCores returns the MaxCores field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *CpuArchitectureProperties) GetMaxCores() *int32 { if o == nil { return nil @@ -121,7 +121,7 @@ func (o *CpuArchitectureProperties) HasMaxCores() bool { } // GetMaxRam returns the MaxRam field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *CpuArchitectureProperties) GetMaxRam() *int32 { if o == nil { return nil @@ -159,7 +159,7 @@ func (o *CpuArchitectureProperties) HasMaxRam() bool { } // GetVendor returns the Vendor field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *CpuArchitectureProperties) GetVendor() *string { if o == nil { return nil @@ -201,15 +201,19 @@ func (o CpuArchitectureProperties) MarshalJSON() ([]byte, error) { if o.CpuFamily != nil { toSerialize["cpuFamily"] = o.CpuFamily } + if o.MaxCores != nil { toSerialize["maxCores"] = o.MaxCores } + if o.MaxRam != nil { toSerialize["maxRam"] = o.MaxRam } + if o.Vendor != nil { toSerialize["vendor"] = o.Vendor } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_data_center_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_data_center_entities.go index 65fa3ee3d..2ac821f85 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_data_center_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_data_center_entities.go @@ -16,12 +16,12 @@ import ( // DataCenterEntities struct for DataCenterEntities type DataCenterEntities struct { - Servers *Servers `json:"servers,omitempty"` - Volumes *Volumes `json:"volumes,omitempty"` - Loadbalancers *Loadbalancers `json:"loadbalancers,omitempty"` Lans *Lans `json:"lans,omitempty"` - Networkloadbalancers *NetworkLoadBalancers `json:"networkloadbalancers,omitempty"` + Loadbalancers *Loadbalancers `json:"loadbalancers,omitempty"` Natgateways *NatGateways `json:"natgateways,omitempty"` + Networkloadbalancers *NetworkLoadBalancers `json:"networkloadbalancers,omitempty"` + Servers *Servers `json:"servers,omitempty"` + Volumes *Volumes `json:"volumes,omitempty"` } // NewDataCenterEntities instantiates a new DataCenterEntities object @@ -42,228 +42,228 @@ func NewDataCenterEntitiesWithDefaults() *DataCenterEntities { return &this } -// GetServers returns the Servers field value -// If the value is explicit nil, the zero value for Servers will be returned -func (o *DataCenterEntities) GetServers() *Servers { +// GetLans returns the Lans field value +// If the value is explicit nil, nil is returned +func (o *DataCenterEntities) GetLans() *Lans { if o == nil { return nil } - return o.Servers + return o.Lans } -// GetServersOk returns a tuple with the Servers field value +// GetLansOk returns a tuple with the Lans field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DataCenterEntities) GetServersOk() (*Servers, bool) { +func (o *DataCenterEntities) GetLansOk() (*Lans, bool) { if o == nil { return nil, false } - return o.Servers, true + return o.Lans, true } -// SetServers sets field value -func (o *DataCenterEntities) SetServers(v Servers) { +// SetLans sets field value +func (o *DataCenterEntities) SetLans(v Lans) { - o.Servers = &v + o.Lans = &v } -// HasServers returns a boolean if a field has been set. -func (o *DataCenterEntities) HasServers() bool { - if o != nil && o.Servers != nil { +// HasLans returns a boolean if a field has been set. +func (o *DataCenterEntities) HasLans() bool { + if o != nil && o.Lans != nil { return true } return false } -// GetVolumes returns the Volumes field value -// If the value is explicit nil, the zero value for Volumes will be returned -func (o *DataCenterEntities) GetVolumes() *Volumes { +// GetLoadbalancers returns the Loadbalancers field value +// If the value is explicit nil, nil is returned +func (o *DataCenterEntities) GetLoadbalancers() *Loadbalancers { if o == nil { return nil } - return o.Volumes + return o.Loadbalancers } -// GetVolumesOk returns a tuple with the Volumes field value +// GetLoadbalancersOk returns a tuple with the Loadbalancers field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DataCenterEntities) GetVolumesOk() (*Volumes, bool) { +func (o *DataCenterEntities) GetLoadbalancersOk() (*Loadbalancers, bool) { if o == nil { return nil, false } - return o.Volumes, true + return o.Loadbalancers, true } -// SetVolumes sets field value -func (o *DataCenterEntities) SetVolumes(v Volumes) { +// SetLoadbalancers sets field value +func (o *DataCenterEntities) SetLoadbalancers(v Loadbalancers) { - o.Volumes = &v + o.Loadbalancers = &v } -// HasVolumes returns a boolean if a field has been set. -func (o *DataCenterEntities) HasVolumes() bool { - if o != nil && o.Volumes != nil { +// HasLoadbalancers returns a boolean if a field has been set. +func (o *DataCenterEntities) HasLoadbalancers() bool { + if o != nil && o.Loadbalancers != nil { return true } return false } -// GetLoadbalancers returns the Loadbalancers field value -// If the value is explicit nil, the zero value for Loadbalancers will be returned -func (o *DataCenterEntities) GetLoadbalancers() *Loadbalancers { +// GetNatgateways returns the Natgateways field value +// If the value is explicit nil, nil is returned +func (o *DataCenterEntities) GetNatgateways() *NatGateways { if o == nil { return nil } - return o.Loadbalancers + return o.Natgateways } -// GetLoadbalancersOk returns a tuple with the Loadbalancers field value +// GetNatgatewaysOk returns a tuple with the Natgateways field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DataCenterEntities) GetLoadbalancersOk() (*Loadbalancers, bool) { +func (o *DataCenterEntities) GetNatgatewaysOk() (*NatGateways, bool) { if o == nil { return nil, false } - return o.Loadbalancers, true + return o.Natgateways, true } -// SetLoadbalancers sets field value -func (o *DataCenterEntities) SetLoadbalancers(v Loadbalancers) { +// SetNatgateways sets field value +func (o *DataCenterEntities) SetNatgateways(v NatGateways) { - o.Loadbalancers = &v + o.Natgateways = &v } -// HasLoadbalancers returns a boolean if a field has been set. -func (o *DataCenterEntities) HasLoadbalancers() bool { - if o != nil && o.Loadbalancers != nil { +// HasNatgateways returns a boolean if a field has been set. +func (o *DataCenterEntities) HasNatgateways() bool { + if o != nil && o.Natgateways != nil { return true } return false } -// GetLans returns the Lans field value -// If the value is explicit nil, the zero value for Lans will be returned -func (o *DataCenterEntities) GetLans() *Lans { +// GetNetworkloadbalancers returns the Networkloadbalancers field value +// If the value is explicit nil, nil is returned +func (o *DataCenterEntities) GetNetworkloadbalancers() *NetworkLoadBalancers { if o == nil { return nil } - return o.Lans + return o.Networkloadbalancers } -// GetLansOk returns a tuple with the Lans field value +// GetNetworkloadbalancersOk returns a tuple with the Networkloadbalancers field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DataCenterEntities) GetLansOk() (*Lans, bool) { +func (o *DataCenterEntities) GetNetworkloadbalancersOk() (*NetworkLoadBalancers, bool) { if o == nil { return nil, false } - return o.Lans, true + return o.Networkloadbalancers, true } -// SetLans sets field value -func (o *DataCenterEntities) SetLans(v Lans) { +// SetNetworkloadbalancers sets field value +func (o *DataCenterEntities) SetNetworkloadbalancers(v NetworkLoadBalancers) { - o.Lans = &v + o.Networkloadbalancers = &v } -// HasLans returns a boolean if a field has been set. -func (o *DataCenterEntities) HasLans() bool { - if o != nil && o.Lans != nil { +// HasNetworkloadbalancers returns a boolean if a field has been set. +func (o *DataCenterEntities) HasNetworkloadbalancers() bool { + if o != nil && o.Networkloadbalancers != nil { return true } return false } -// GetNetworkloadbalancers returns the Networkloadbalancers field value -// If the value is explicit nil, the zero value for NetworkLoadBalancers will be returned -func (o *DataCenterEntities) GetNetworkloadbalancers() *NetworkLoadBalancers { +// GetServers returns the Servers field value +// If the value is explicit nil, nil is returned +func (o *DataCenterEntities) GetServers() *Servers { if o == nil { return nil } - return o.Networkloadbalancers + return o.Servers } -// GetNetworkloadbalancersOk returns a tuple with the Networkloadbalancers field value +// GetServersOk returns a tuple with the Servers field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DataCenterEntities) GetNetworkloadbalancersOk() (*NetworkLoadBalancers, bool) { +func (o *DataCenterEntities) GetServersOk() (*Servers, bool) { if o == nil { return nil, false } - return o.Networkloadbalancers, true + return o.Servers, true } -// SetNetworkloadbalancers sets field value -func (o *DataCenterEntities) SetNetworkloadbalancers(v NetworkLoadBalancers) { +// SetServers sets field value +func (o *DataCenterEntities) SetServers(v Servers) { - o.Networkloadbalancers = &v + o.Servers = &v } -// HasNetworkloadbalancers returns a boolean if a field has been set. -func (o *DataCenterEntities) HasNetworkloadbalancers() bool { - if o != nil && o.Networkloadbalancers != nil { +// HasServers returns a boolean if a field has been set. +func (o *DataCenterEntities) HasServers() bool { + if o != nil && o.Servers != nil { return true } return false } -// GetNatgateways returns the Natgateways field value -// If the value is explicit nil, the zero value for NatGateways will be returned -func (o *DataCenterEntities) GetNatgateways() *NatGateways { +// GetVolumes returns the Volumes field value +// If the value is explicit nil, nil is returned +func (o *DataCenterEntities) GetVolumes() *Volumes { if o == nil { return nil } - return o.Natgateways + return o.Volumes } -// GetNatgatewaysOk returns a tuple with the Natgateways field value +// GetVolumesOk returns a tuple with the Volumes field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DataCenterEntities) GetNatgatewaysOk() (*NatGateways, bool) { +func (o *DataCenterEntities) GetVolumesOk() (*Volumes, bool) { if o == nil { return nil, false } - return o.Natgateways, true + return o.Volumes, true } -// SetNatgateways sets field value -func (o *DataCenterEntities) SetNatgateways(v NatGateways) { +// SetVolumes sets field value +func (o *DataCenterEntities) SetVolumes(v Volumes) { - o.Natgateways = &v + o.Volumes = &v } -// HasNatgateways returns a boolean if a field has been set. -func (o *DataCenterEntities) HasNatgateways() bool { - if o != nil && o.Natgateways != nil { +// HasVolumes returns a boolean if a field has been set. +func (o *DataCenterEntities) HasVolumes() bool { + if o != nil && o.Volumes != nil { return true } @@ -272,24 +272,30 @@ func (o *DataCenterEntities) HasNatgateways() bool { func (o DataCenterEntities) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Servers != nil { - toSerialize["servers"] = o.Servers - } - if o.Volumes != nil { - toSerialize["volumes"] = o.Volumes + if o.Lans != nil { + toSerialize["lans"] = o.Lans } + if o.Loadbalancers != nil { toSerialize["loadbalancers"] = o.Loadbalancers } - if o.Lans != nil { - toSerialize["lans"] = o.Lans + + if o.Natgateways != nil { + toSerialize["natgateways"] = o.Natgateways } + if o.Networkloadbalancers != nil { toSerialize["networkloadbalancers"] = o.Networkloadbalancers } - if o.Natgateways != nil { - toSerialize["natgateways"] = o.Natgateways + + if o.Servers != nil { + toSerialize["servers"] = o.Servers } + + if o.Volumes != nil { + toSerialize["volumes"] = o.Volumes + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter.go index 45a4e9d7d..9b53f656c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter.go @@ -16,15 +16,15 @@ import ( // Datacenter struct for Datacenter type Datacenter struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *DataCenterEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *DatacenterProperties `json:"properties"` - Entities *DataCenterEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewDatacenter instantiates a new Datacenter object @@ -47,114 +47,114 @@ func NewDatacenterWithDefaults() *Datacenter { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Datacenter) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Datacenter) GetEntities() *DataCenterEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenter) GetIdOk() (*string, bool) { +func (o *Datacenter) GetEntitiesOk() (*DataCenterEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Datacenter) SetId(v string) { +// SetEntities sets field value +func (o *Datacenter) SetEntities(v DataCenterEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Datacenter) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Datacenter) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Datacenter) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Datacenter) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenter) GetTypeOk() (*Type, bool) { +func (o *Datacenter) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Datacenter) SetType(v Type) { +// SetHref sets field value +func (o *Datacenter) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Datacenter) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Datacenter) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Datacenter) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Datacenter) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenter) GetHrefOk() (*string, bool) { +func (o *Datacenter) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Datacenter) SetHref(v string) { +// SetId sets field value +func (o *Datacenter) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Datacenter) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Datacenter) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *Datacenter) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *Datacenter) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *Datacenter) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for DatacenterProperties will be returned +// If the value is explicit nil, nil is returned func (o *Datacenter) GetProperties() *DatacenterProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *Datacenter) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for DataCenterEntities will be returned -func (o *Datacenter) GetEntities() *DataCenterEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Datacenter) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenter) GetEntitiesOk() (*DataCenterEntities, bool) { +func (o *Datacenter) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Datacenter) SetEntities(v DataCenterEntities) { +// SetType sets field value +func (o *Datacenter) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Datacenter) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Datacenter) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *Datacenter) HasEntities() bool { func (o Datacenter) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go index 3034ac67d..d0d262ff5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go @@ -17,20 +17,20 @@ import ( // DatacenterElementMetadata struct for DatacenterElementMetadata type DatacenterElementMetadata struct { - // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. - Etag *string `json:"etag,omitempty"` - // The last time the resource was created. - CreatedDate *IonosTime // The user who created the resource. CreatedBy *string `json:"createdBy,omitempty"` // The unique ID of the user who created the resource. CreatedByUserId *string `json:"createdByUserId,omitempty"` - // The last time the resource was modified. - LastModifiedDate *IonosTime + // The last time the resource was created. + CreatedDate *IonosTime + // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. + Etag *string `json:"etag,omitempty"` // The user who last modified the resource. LastModifiedBy *string `json:"lastModifiedBy,omitempty"` // The unique ID of the user who last modified the resource. LastModifiedByUserId *string `json:"lastModifiedByUserId,omitempty"` + // The last time the resource was modified. + LastModifiedDate *IonosTime // State of the resource. *AVAILABLE* There are no pending modification requests for this item; *BUSY* There is at least one modification request pending and all following requests will be queued; *INACTIVE* Resource has been de-provisioned; *DEPLOYING* Resource state DEPLOYING - relevant for Kubernetes cluster/nodepool; *ACTIVE* Resource state ACTIVE - relevant for Kubernetes cluster/nodepool; *FAILED* Resource state FAILED - relevant for Kubernetes cluster/nodepool; *SUSPENDED* Resource state SUSPENDED - relevant for Kubernetes cluster/nodepool; *FAILED_SUSPENDED* Resource state FAILED_SUSPENDED - relevant for Kubernetes cluster; *UPDATING* Resource state UPDATING - relevant for Kubernetes cluster/nodepool; *FAILED_UPDATING* Resource state FAILED_UPDATING - relevant for Kubernetes cluster/nodepool; *DESTROYING* Resource state DESTROYING - relevant for Kubernetes cluster; *FAILED_DESTROYING* Resource state FAILED_DESTROYING - relevant for Kubernetes cluster/nodepool; *TERMINATED* Resource state TERMINATED - relevant for Kubernetes cluster/nodepool; *HIBERNATING* Resource state HIBERNATING - relevant for Kubernetes cluster/nodepool; *FAILED_HIBERNATING* Resource state FAILED_HIBERNATING - relevant for Kubernetes cluster/nodepool; *MAINTENANCE* Resource state MAINTENANCE - relevant for Kubernetes cluster/nodepool; *FAILED_HIBERNATING* Resource state FAILED_HIBERNATING - relevant for Kubernetes cluster/nodepool. State *string `json:"state,omitempty"` } @@ -53,91 +53,8 @@ func NewDatacenterElementMetadataWithDefaults() *DatacenterElementMetadata { return &this } -// GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned -func (o *DatacenterElementMetadata) GetEtag() *string { - if o == nil { - return nil - } - - return o.Etag - -} - -// GetEtagOk returns a tuple with the Etag field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterElementMetadata) GetEtagOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Etag, true -} - -// SetEtag sets field value -func (o *DatacenterElementMetadata) SetEtag(v string) { - - o.Etag = &v - -} - -// HasEtag returns a boolean if a field has been set. -func (o *DatacenterElementMetadata) HasEtag() bool { - if o != nil && o.Etag != nil { - return true - } - - return false -} - -// GetCreatedDate returns the CreatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *DatacenterElementMetadata) GetCreatedDate() *time.Time { - if o == nil { - return nil - } - - if o.CreatedDate == nil { - return nil - } - return &o.CreatedDate.Time - -} - -// GetCreatedDateOk returns a tuple with the CreatedDate field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterElementMetadata) GetCreatedDateOk() (*time.Time, bool) { - if o == nil { - return nil, false - } - - if o.CreatedDate == nil { - return nil, false - } - return &o.CreatedDate.Time, true - -} - -// SetCreatedDate sets field value -func (o *DatacenterElementMetadata) SetCreatedDate(v time.Time) { - - o.CreatedDate = &IonosTime{v} - -} - -// HasCreatedDate returns a boolean if a field has been set. -func (o *DatacenterElementMetadata) HasCreatedDate() bool { - if o != nil && o.CreatedDate != nil { - return true - } - - return false -} - // GetCreatedBy returns the CreatedBy field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterElementMetadata) GetCreatedBy() *string { if o == nil { return nil @@ -175,7 +92,7 @@ func (o *DatacenterElementMetadata) HasCreatedBy() bool { } // GetCreatedByUserId returns the CreatedByUserId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterElementMetadata) GetCreatedByUserId() *string { if o == nil { return nil @@ -212,45 +129,83 @@ func (o *DatacenterElementMetadata) HasCreatedByUserId() bool { return false } -// GetLastModifiedDate returns the LastModifiedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *DatacenterElementMetadata) GetLastModifiedDate() *time.Time { +// GetCreatedDate returns the CreatedDate field value +// If the value is explicit nil, nil is returned +func (o *DatacenterElementMetadata) GetCreatedDate() *time.Time { if o == nil { return nil } - if o.LastModifiedDate == nil { + if o.CreatedDate == nil { return nil } - return &o.LastModifiedDate.Time + return &o.CreatedDate.Time } -// GetLastModifiedDateOk returns a tuple with the LastModifiedDate field value +// GetCreatedDateOk returns a tuple with the CreatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterElementMetadata) GetLastModifiedDateOk() (*time.Time, bool) { +func (o *DatacenterElementMetadata) GetCreatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - if o.LastModifiedDate == nil { + if o.CreatedDate == nil { return nil, false } - return &o.LastModifiedDate.Time, true + return &o.CreatedDate.Time, true } -// SetLastModifiedDate sets field value -func (o *DatacenterElementMetadata) SetLastModifiedDate(v time.Time) { +// SetCreatedDate sets field value +func (o *DatacenterElementMetadata) SetCreatedDate(v time.Time) { - o.LastModifiedDate = &IonosTime{v} + o.CreatedDate = &IonosTime{v} } -// HasLastModifiedDate returns a boolean if a field has been set. -func (o *DatacenterElementMetadata) HasLastModifiedDate() bool { - if o != nil && o.LastModifiedDate != nil { +// HasCreatedDate returns a boolean if a field has been set. +func (o *DatacenterElementMetadata) HasCreatedDate() bool { + if o != nil && o.CreatedDate != nil { + return true + } + + return false +} + +// GetEtag returns the Etag field value +// If the value is explicit nil, nil is returned +func (o *DatacenterElementMetadata) GetEtag() *string { + if o == nil { + return nil + } + + return o.Etag + +} + +// GetEtagOk returns a tuple with the Etag field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *DatacenterElementMetadata) GetEtagOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Etag, true +} + +// SetEtag sets field value +func (o *DatacenterElementMetadata) SetEtag(v string) { + + o.Etag = &v + +} + +// HasEtag returns a boolean if a field has been set. +func (o *DatacenterElementMetadata) HasEtag() bool { + if o != nil && o.Etag != nil { return true } @@ -258,7 +213,7 @@ func (o *DatacenterElementMetadata) HasLastModifiedDate() bool { } // GetLastModifiedBy returns the LastModifiedBy field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterElementMetadata) GetLastModifiedBy() *string { if o == nil { return nil @@ -296,7 +251,7 @@ func (o *DatacenterElementMetadata) HasLastModifiedBy() bool { } // GetLastModifiedByUserId returns the LastModifiedByUserId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterElementMetadata) GetLastModifiedByUserId() *string { if o == nil { return nil @@ -333,8 +288,53 @@ func (o *DatacenterElementMetadata) HasLastModifiedByUserId() bool { return false } +// GetLastModifiedDate returns the LastModifiedDate field value +// If the value is explicit nil, nil is returned +func (o *DatacenterElementMetadata) GetLastModifiedDate() *time.Time { + if o == nil { + return nil + } + + if o.LastModifiedDate == nil { + return nil + } + return &o.LastModifiedDate.Time + +} + +// GetLastModifiedDateOk returns a tuple with the LastModifiedDate field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *DatacenterElementMetadata) GetLastModifiedDateOk() (*time.Time, bool) { + if o == nil { + return nil, false + } + + if o.LastModifiedDate == nil { + return nil, false + } + return &o.LastModifiedDate.Time, true + +} + +// SetLastModifiedDate sets field value +func (o *DatacenterElementMetadata) SetLastModifiedDate(v time.Time) { + + o.LastModifiedDate = &IonosTime{v} + +} + +// HasLastModifiedDate returns a boolean if a field has been set. +func (o *DatacenterElementMetadata) HasLastModifiedDate() bool { + if o != nil && o.LastModifiedDate != nil { + return true + } + + return false +} + // GetState returns the State field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterElementMetadata) GetState() *string { if o == nil { return nil @@ -373,30 +373,38 @@ func (o *DatacenterElementMetadata) HasState() bool { func (o DatacenterElementMetadata) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Etag != nil { - toSerialize["etag"] = o.Etag - } - if o.CreatedDate != nil { - toSerialize["createdDate"] = o.CreatedDate - } if o.CreatedBy != nil { toSerialize["createdBy"] = o.CreatedBy } + if o.CreatedByUserId != nil { toSerialize["createdByUserId"] = o.CreatedByUserId } - if o.LastModifiedDate != nil { - toSerialize["lastModifiedDate"] = o.LastModifiedDate + + if o.CreatedDate != nil { + toSerialize["createdDate"] = o.CreatedDate } + + if o.Etag != nil { + toSerialize["etag"] = o.Etag + } + if o.LastModifiedBy != nil { toSerialize["lastModifiedBy"] = o.LastModifiedBy } + if o.LastModifiedByUserId != nil { toSerialize["lastModifiedByUserId"] = o.LastModifiedByUserId } + + if o.LastModifiedDate != nil { + toSerialize["lastModifiedDate"] = o.LastModifiedDate + } + if o.State != nil { toSerialize["state"] = o.State } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_properties.go index 7ea457242..67f0763fe 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_properties.go @@ -16,20 +16,23 @@ import ( // DatacenterProperties struct for DatacenterProperties type DatacenterProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` + // Array of features and CPU families available in a location + CpuArchitecture *[]CpuArchitectureProperties `json:"cpuArchitecture,omitempty"` // A description for the datacenter, such as staging, production. Description *string `json:"description,omitempty"` - // The physical location where the datacenter will be created. This will be where all of your servers live. Property cannot be modified after datacenter creation (disallowed in update requests). - Location *string `json:"location"` - // The version of the data center; incremented with every change. - Version *int32 `json:"version,omitempty"` // List of features supported by the location where this data center is provisioned. Features *[]string `json:"features,omitempty"` + // [The IPv6 feature is in beta phase and not ready for production usage.] This value is either 'null' or contains an automatically-assigned /56 IPv6 CIDR block if IPv6 is enabled on this virtual data center. It can neither be changed nor removed. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetIpv6CidrBlockNil` + Ipv6CidrBlock *string `json:"ipv6CidrBlock,omitempty"` + // The physical location where the datacenter will be created. This will be where all of your servers live. Property cannot be modified after datacenter creation (disallowed in update requests). + Location *string `json:"location"` + // The name of the resource. + Name *string `json:"name,omitempty"` // Boolean value representing if the data center requires extra protection, such as two-step verification. SecAuthProtection *bool `json:"secAuthProtection,omitempty"` - // Array of features and CPU families available in a location - CpuArchitecture *[]CpuArchitectureProperties `json:"cpuArchitecture,omitempty"` + // The version of the data center; incremented with every change. + Version *int32 `json:"version,omitempty"` } // NewDatacenterProperties instantiates a new DatacenterProperties object @@ -52,38 +55,38 @@ func NewDatacenterPropertiesWithDefaults() *DatacenterProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *DatacenterProperties) GetName() *string { +// GetCpuArchitecture returns the CpuArchitecture field value +// If the value is explicit nil, nil is returned +func (o *DatacenterProperties) GetCpuArchitecture() *[]CpuArchitectureProperties { if o == nil { return nil } - return o.Name + return o.CpuArchitecture } -// GetNameOk returns a tuple with the Name field value +// GetCpuArchitectureOk returns a tuple with the CpuArchitecture field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterProperties) GetNameOk() (*string, bool) { +func (o *DatacenterProperties) GetCpuArchitectureOk() (*[]CpuArchitectureProperties, bool) { if o == nil { return nil, false } - return o.Name, true + return o.CpuArchitecture, true } -// SetName sets field value -func (o *DatacenterProperties) SetName(v string) { +// SetCpuArchitecture sets field value +func (o *DatacenterProperties) SetCpuArchitecture(v []CpuArchitectureProperties) { - o.Name = &v + o.CpuArchitecture = &v } -// HasName returns a boolean if a field has been set. -func (o *DatacenterProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasCpuArchitecture returns a boolean if a field has been set. +func (o *DatacenterProperties) HasCpuArchitecture() bool { + if o != nil && o.CpuArchitecture != nil { return true } @@ -91,7 +94,7 @@ func (o *DatacenterProperties) HasName() bool { } // GetDescription returns the Description field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterProperties) GetDescription() *string { if o == nil { return nil @@ -128,114 +131,157 @@ func (o *DatacenterProperties) HasDescription() bool { return false } -// GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned -func (o *DatacenterProperties) GetLocation() *string { +// GetFeatures returns the Features field value +// If the value is explicit nil, nil is returned +func (o *DatacenterProperties) GetFeatures() *[]string { if o == nil { return nil } - return o.Location + return o.Features } -// GetLocationOk returns a tuple with the Location field value +// GetFeaturesOk returns a tuple with the Features field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterProperties) GetLocationOk() (*string, bool) { +func (o *DatacenterProperties) GetFeaturesOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Location, true + return o.Features, true } -// SetLocation sets field value -func (o *DatacenterProperties) SetLocation(v string) { +// SetFeatures sets field value +func (o *DatacenterProperties) SetFeatures(v []string) { - o.Location = &v + o.Features = &v } -// HasLocation returns a boolean if a field has been set. -func (o *DatacenterProperties) HasLocation() bool { - if o != nil && o.Location != nil { +// HasFeatures returns a boolean if a field has been set. +func (o *DatacenterProperties) HasFeatures() bool { + if o != nil && o.Features != nil { return true } return false } -// GetVersion returns the Version field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *DatacenterProperties) GetVersion() *int32 { +// GetIpv6CidrBlock returns the Ipv6CidrBlock field value +// If the value is explicit nil, nil is returned +func (o *DatacenterProperties) GetIpv6CidrBlock() *string { if o == nil { return nil } - return o.Version + return o.Ipv6CidrBlock } -// GetVersionOk returns a tuple with the Version field value +// GetIpv6CidrBlockOk returns a tuple with the Ipv6CidrBlock field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterProperties) GetVersionOk() (*int32, bool) { +func (o *DatacenterProperties) GetIpv6CidrBlockOk() (*string, bool) { if o == nil { return nil, false } - return o.Version, true + return o.Ipv6CidrBlock, true } -// SetVersion sets field value -func (o *DatacenterProperties) SetVersion(v int32) { +// SetIpv6CidrBlock sets field value +func (o *DatacenterProperties) SetIpv6CidrBlock(v string) { - o.Version = &v + o.Ipv6CidrBlock = &v } -// HasVersion returns a boolean if a field has been set. -func (o *DatacenterProperties) HasVersion() bool { - if o != nil && o.Version != nil { +// sets Ipv6CidrBlock to the explicit address that will be encoded as nil when marshaled +func (o *DatacenterProperties) SetIpv6CidrBlockNil() { + o.Ipv6CidrBlock = &Nilstring +} + +// HasIpv6CidrBlock returns a boolean if a field has been set. +func (o *DatacenterProperties) HasIpv6CidrBlock() bool { + if o != nil && o.Ipv6CidrBlock != nil { return true } return false } -// GetFeatures returns the Features field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *DatacenterProperties) GetFeatures() *[]string { +// GetLocation returns the Location field value +// If the value is explicit nil, nil is returned +func (o *DatacenterProperties) GetLocation() *string { if o == nil { return nil } - return o.Features + return o.Location } -// GetFeaturesOk returns a tuple with the Features field value +// GetLocationOk returns a tuple with the Location field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterProperties) GetFeaturesOk() (*[]string, bool) { +func (o *DatacenterProperties) GetLocationOk() (*string, bool) { if o == nil { return nil, false } - return o.Features, true + return o.Location, true } -// SetFeatures sets field value -func (o *DatacenterProperties) SetFeatures(v []string) { +// SetLocation sets field value +func (o *DatacenterProperties) SetLocation(v string) { - o.Features = &v + o.Location = &v } -// HasFeatures returns a boolean if a field has been set. -func (o *DatacenterProperties) HasFeatures() bool { - if o != nil && o.Features != nil { +// HasLocation returns a boolean if a field has been set. +func (o *DatacenterProperties) HasLocation() bool { + if o != nil && o.Location != nil { + return true + } + + return false +} + +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *DatacenterProperties) GetName() *string { + if o == nil { + return nil + } + + return o.Name + +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *DatacenterProperties) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Name, true +} + +// SetName sets field value +func (o *DatacenterProperties) SetName(v string) { + + o.Name = &v + +} + +// HasName returns a boolean if a field has been set. +func (o *DatacenterProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -243,7 +289,7 @@ func (o *DatacenterProperties) HasFeatures() bool { } // GetSecAuthProtection returns the SecAuthProtection field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *DatacenterProperties) GetSecAuthProtection() *bool { if o == nil { return nil @@ -280,38 +326,38 @@ func (o *DatacenterProperties) HasSecAuthProtection() bool { return false } -// GetCpuArchitecture returns the CpuArchitecture field value -// If the value is explicit nil, the zero value for []CpuArchitectureProperties will be returned -func (o *DatacenterProperties) GetCpuArchitecture() *[]CpuArchitectureProperties { +// GetVersion returns the Version field value +// If the value is explicit nil, nil is returned +func (o *DatacenterProperties) GetVersion() *int32 { if o == nil { return nil } - return o.CpuArchitecture + return o.Version } -// GetCpuArchitectureOk returns a tuple with the CpuArchitecture field value +// GetVersionOk returns a tuple with the Version field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *DatacenterProperties) GetCpuArchitectureOk() (*[]CpuArchitectureProperties, bool) { +func (o *DatacenterProperties) GetVersionOk() (*int32, bool) { if o == nil { return nil, false } - return o.CpuArchitecture, true + return o.Version, true } -// SetCpuArchitecture sets field value -func (o *DatacenterProperties) SetCpuArchitecture(v []CpuArchitectureProperties) { +// SetVersion sets field value +func (o *DatacenterProperties) SetVersion(v int32) { - o.CpuArchitecture = &v + o.Version = &v } -// HasCpuArchitecture returns a boolean if a field has been set. -func (o *DatacenterProperties) HasCpuArchitecture() bool { - if o != nil && o.CpuArchitecture != nil { +// HasVersion returns a boolean if a field has been set. +func (o *DatacenterProperties) HasVersion() bool { + if o != nil && o.Version != nil { return true } @@ -320,27 +366,39 @@ func (o *DatacenterProperties) HasCpuArchitecture() bool { func (o DatacenterProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.CpuArchitecture != nil { + toSerialize["cpuArchitecture"] = o.CpuArchitecture } + if o.Description != nil { toSerialize["description"] = o.Description } + + if o.Features != nil { + toSerialize["features"] = o.Features + } + + if o.Ipv6CidrBlock == &Nilstring { + toSerialize["ipv6CidrBlock"] = nil + } else if o.Ipv6CidrBlock != nil { + toSerialize["ipv6CidrBlock"] = o.Ipv6CidrBlock + } if o.Location != nil { toSerialize["location"] = o.Location } - if o.Version != nil { - toSerialize["version"] = o.Version - } - if o.Features != nil { - toSerialize["features"] = o.Features + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.SecAuthProtection != nil { toSerialize["secAuthProtection"] = o.SecAuthProtection } - if o.CpuArchitecture != nil { - toSerialize["cpuArchitecture"] = o.CpuArchitecture + + if o.Version != nil { + toSerialize["version"] = o.Version } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenters.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenters.go index 28911601f..43c00c3c7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenters.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenters.go @@ -16,19 +16,19 @@ import ( // Datacenters struct for Datacenters type Datacenters struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Datacenter `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewDatacenters instantiates a new Datacenters object @@ -49,114 +49,114 @@ func NewDatacentersWithDefaults() *Datacenters { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Datacenters) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Datacenters) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenters) GetIdOk() (*string, bool) { +func (o *Datacenters) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Datacenters) SetId(v string) { +// SetLinks sets field value +func (o *Datacenters) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Datacenters) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Datacenters) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Datacenters) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Datacenters) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenters) GetTypeOk() (*Type, bool) { +func (o *Datacenters) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Datacenters) SetType(v Type) { +// SetHref sets field value +func (o *Datacenters) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Datacenters) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Datacenters) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Datacenters) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Datacenters) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenters) GetHrefOk() (*string, bool) { +func (o *Datacenters) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Datacenters) SetHref(v string) { +// SetId sets field value +func (o *Datacenters) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Datacenters) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Datacenters) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Datacenters) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Datacenter will be returned +// If the value is explicit nil, nil is returned func (o *Datacenters) GetItems() *[]Datacenter { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Datacenters) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Datacenters) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Datacenters) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenters) GetOffsetOk() (*float32, bool) { +func (o *Datacenters) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Datacenters) SetOffset(v float32) { +// SetLimit sets field value +func (o *Datacenters) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Datacenters) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Datacenters) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Datacenters) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Datacenters) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenters) GetLimitOk() (*float32, bool) { +func (o *Datacenters) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Datacenters) SetLimit(v float32) { +// SetOffset sets field value +func (o *Datacenters) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Datacenters) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Datacenters) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Datacenters) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Datacenters) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Datacenters) GetLinksOk() (*PaginationLinks, bool) { +func (o *Datacenters) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Datacenters) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Datacenters) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Datacenters) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Datacenters) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Datacenters) HasLinks() bool { func (o Datacenters) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_error.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_error.go index a383952b7..d5e52dd39 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_error.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_error.go @@ -40,7 +40,7 @@ func NewErrorWithDefaults() *Error { } // GetHttpStatus returns the HttpStatus field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *Error) GetHttpStatus() *int32 { if o == nil { return nil @@ -78,7 +78,7 @@ func (o *Error) HasHttpStatus() bool { } // GetMessages returns the Messages field value -// If the value is explicit nil, the zero value for []ErrorMessage will be returned +// If the value is explicit nil, nil is returned func (o *Error) GetMessages() *[]ErrorMessage { if o == nil { return nil @@ -120,9 +120,11 @@ func (o Error) MarshalJSON() ([]byte, error) { if o.HttpStatus != nil { toSerialize["httpStatus"] = o.HttpStatus } + if o.Messages != nil { toSerialize["messages"] = o.Messages } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_error_message.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_error_message.go index f3044d977..d567be376 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_error_message.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_error_message.go @@ -41,7 +41,7 @@ func NewErrorMessageWithDefaults() *ErrorMessage { } // GetErrorCode returns the ErrorCode field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ErrorMessage) GetErrorCode() *string { if o == nil { return nil @@ -79,7 +79,7 @@ func (o *ErrorMessage) HasErrorCode() bool { } // GetMessage returns the Message field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ErrorMessage) GetMessage() *string { if o == nil { return nil @@ -121,9 +121,11 @@ func (o ErrorMessage) MarshalJSON() ([]byte, error) { if o.ErrorCode != nil { toSerialize["errorCode"] = o.ErrorCode } + if o.Message != nil { toSerialize["message"] = o.Message } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rule.go index 6b472e4e9..88f0f5ead 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rule.go @@ -16,14 +16,14 @@ import ( // FirewallRule struct for FirewallRule type FirewallRule struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *FirewallruleProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewFirewallRule instantiates a new FirewallRule object @@ -46,190 +46,190 @@ func NewFirewallRuleWithDefaults() *FirewallRule { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallRule) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *FirewallRule) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRule) GetIdOk() (*string, bool) { +func (o *FirewallRule) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *FirewallRule) SetId(v string) { +// SetHref sets field value +func (o *FirewallRule) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *FirewallRule) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *FirewallRule) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *FirewallRule) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *FirewallRule) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRule) GetTypeOk() (*Type, bool) { +func (o *FirewallRule) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *FirewallRule) SetType(v Type) { +// SetId sets field value +func (o *FirewallRule) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *FirewallRule) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *FirewallRule) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallRule) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *FirewallRule) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRule) GetHrefOk() (*string, bool) { +func (o *FirewallRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *FirewallRule) SetHref(v string) { +// SetMetadata sets field value +func (o *FirewallRule) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *FirewallRule) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *FirewallRule) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *FirewallRule) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *FirewallRule) GetProperties() *FirewallruleProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *FirewallRule) GetPropertiesOk() (*FirewallruleProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *FirewallRule) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *FirewallRule) SetProperties(v FirewallruleProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *FirewallRule) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *FirewallRule) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for FirewallruleProperties will be returned -func (o *FirewallRule) GetProperties() *FirewallruleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *FirewallRule) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRule) GetPropertiesOk() (*FirewallruleProperties, bool) { +func (o *FirewallRule) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *FirewallRule) SetProperties(v FirewallruleProperties) { +// SetType sets field value +func (o *FirewallRule) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *FirewallRule) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *FirewallRule) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *FirewallRule) HasProperties() bool { func (o FirewallRule) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rules.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rules.go index f3b572931..5701dda12 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rules.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewall_rules.go @@ -16,19 +16,19 @@ import ( // FirewallRules struct for FirewallRules type FirewallRules struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]FirewallRule `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewFirewallRules instantiates a new FirewallRules object @@ -49,114 +49,114 @@ func NewFirewallRulesWithDefaults() *FirewallRules { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallRules) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *FirewallRules) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRules) GetIdOk() (*string, bool) { +func (o *FirewallRules) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *FirewallRules) SetId(v string) { +// SetLinks sets field value +func (o *FirewallRules) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *FirewallRules) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *FirewallRules) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *FirewallRules) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *FirewallRules) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRules) GetTypeOk() (*Type, bool) { +func (o *FirewallRules) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *FirewallRules) SetType(v Type) { +// SetHref sets field value +func (o *FirewallRules) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *FirewallRules) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *FirewallRules) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallRules) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *FirewallRules) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRules) GetHrefOk() (*string, bool) { +func (o *FirewallRules) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *FirewallRules) SetHref(v string) { +// SetId sets field value +func (o *FirewallRules) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *FirewallRules) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *FirewallRules) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *FirewallRules) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []FirewallRule will be returned +// If the value is explicit nil, nil is returned func (o *FirewallRules) GetItems() *[]FirewallRule { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *FirewallRules) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *FirewallRules) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *FirewallRules) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRules) GetOffsetOk() (*float32, bool) { +func (o *FirewallRules) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *FirewallRules) SetOffset(v float32) { +// SetLimit sets field value +func (o *FirewallRules) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *FirewallRules) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *FirewallRules) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *FirewallRules) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *FirewallRules) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRules) GetLimitOk() (*float32, bool) { +func (o *FirewallRules) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *FirewallRules) SetLimit(v float32) { +// SetOffset sets field value +func (o *FirewallRules) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *FirewallRules) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *FirewallRules) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *FirewallRules) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *FirewallRules) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallRules) GetLinksOk() (*PaginationLinks, bool) { +func (o *FirewallRules) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *FirewallRules) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *FirewallRules) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *FirewallRules) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *FirewallRules) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *FirewallRules) HasLinks() bool { func (o FirewallRules) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go index 640cd23fb..0e5de49b3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go @@ -16,26 +16,32 @@ import ( // FirewallruleProperties struct for FirewallruleProperties type FirewallruleProperties struct { + // Defines the allowed code (from 0 to 254) if protocol ICMP or ICMPv6 is chosen. Value null allows all codes. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilint32` can be used, or the setter `SetIcmpCodeNil` + IcmpCode *int32 `json:"icmpCode,omitempty"` + // Defines the allowed type (from 0 to 254) if the protocol ICMP or ICMPv6 is chosen. Value null allows all types. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilint32` can be used, or the setter `SetIcmpTypeNil` + IcmpType *int32 `json:"icmpType,omitempty"` + // The IP version for this rule. If sourceIp or targetIp are specified, you can omit this value - the IP version will then be deduced from the IP address(es) used; if you specify it anyway, it must match the specified IP address(es). If neither sourceIp nor targetIp are specified, this rule allows traffic only for the specified IP version. If neither sourceIp, targetIp nor ipVersion are specified, this rule will only allow IPv4 traffic. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetIpVersionNil` + IpVersion *string `json:"ipVersion,omitempty"` // The name of the resource. Name *string `json:"name,omitempty"` + // Defines the end range of the allowed port (from 1 to 65534) if the protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd null to allow all ports. + PortRangeEnd *int32 `json:"portRangeEnd,omitempty"` + // Defines the start range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. + PortRangeStart *int32 `json:"portRangeStart,omitempty"` // The protocol for the rule. Property cannot be modified after it is created (disallowed in update requests). Protocol *string `json:"protocol"` - // Only traffic originating from the respective MAC address is allowed. Valid format: aa:bb:cc:dd:ee:ff. Value null allows traffic from any MAC address. - SourceMac *string `json:"sourceMac,omitempty"` - // The IP version for this rule. If sourceIp or targetIp are specified, you can omit this value - the IP version will then be deduced from the IP address(es) used; if you specify it anyway, it must match the specified IP address(es). If neither sourceIp nor targetIp are specified, this rule allows traffic only for the specified IP version. If neither sourceIp, targetIp nor ipVersion are specified, this rule will only allow IPv4 traffic. - IpVersion *string `json:"ipVersion,omitempty"` // Only traffic originating from the respective IP address (or CIDR block) is allowed. Value null allows traffic from any IP address (according to the selected ipVersion). + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetSourceIpNil` SourceIp *string `json:"sourceIp,omitempty"` + // Only traffic originating from the respective MAC address is allowed. Valid format: aa:bb:cc:dd:ee:ff. Value null allows traffic from any MAC address. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetSourceMacNil` + SourceMac *string `json:"sourceMac,omitempty"` // If the target NIC has multiple IP addresses, only the traffic directed to the respective IP address (or CIDR block) of the NIC is allowed. Value null allows traffic to any target IP address (according to the selected ipVersion). + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetTargetIpNil` TargetIp *string `json:"targetIp,omitempty"` - // Defines the allowed code (from 0 to 254) if protocol ICMP or ICMPv6 is chosen. Value null allows all codes. - IcmpCode *int32 `json:"icmpCode,omitempty"` - // Defines the allowed type (from 0 to 254) if the protocol ICMP or ICMPv6 is chosen. Value null allows all types. - IcmpType *int32 `json:"icmpType,omitempty"` - // Defines the start range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. - PortRangeStart *int32 `json:"portRangeStart,omitempty"` - // Defines the end range of the allowed port (from 1 to 65534) if the protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd null to allow all ports. - PortRangeEnd *int32 `json:"portRangeEnd,omitempty"` // The type of the firewall rule. If not specified, the default INGRESS value is used. Type *string `json:"type,omitempty"` } @@ -60,380 +66,410 @@ func NewFirewallrulePropertiesWithDefaults() *FirewallruleProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallruleProperties) GetName() *string { +// GetIcmpCode returns the IcmpCode field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetIcmpCode() *int32 { if o == nil { return nil } - return o.Name + return o.IcmpCode } -// GetNameOk returns a tuple with the Name field value +// GetIcmpCodeOk returns a tuple with the IcmpCode field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetNameOk() (*string, bool) { +func (o *FirewallruleProperties) GetIcmpCodeOk() (*int32, bool) { if o == nil { return nil, false } - return o.Name, true + return o.IcmpCode, true } -// SetName sets field value -func (o *FirewallruleProperties) SetName(v string) { +// SetIcmpCode sets field value +func (o *FirewallruleProperties) SetIcmpCode(v int32) { - o.Name = &v + o.IcmpCode = &v } -// HasName returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasName() bool { - if o != nil && o.Name != nil { +// sets IcmpCode to the explicit address that will be encoded as nil when marshaled +func (o *FirewallruleProperties) SetIcmpCodeNil() { + o.IcmpCode = &Nilint32 +} + +// HasIcmpCode returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasIcmpCode() bool { + if o != nil && o.IcmpCode != nil { return true } return false } -// GetProtocol returns the Protocol field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallruleProperties) GetProtocol() *string { +// GetIcmpType returns the IcmpType field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetIcmpType() *int32 { if o == nil { return nil } - return o.Protocol + return o.IcmpType } -// GetProtocolOk returns a tuple with the Protocol field value +// GetIcmpTypeOk returns a tuple with the IcmpType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetProtocolOk() (*string, bool) { +func (o *FirewallruleProperties) GetIcmpTypeOk() (*int32, bool) { if o == nil { return nil, false } - return o.Protocol, true + return o.IcmpType, true } -// SetProtocol sets field value -func (o *FirewallruleProperties) SetProtocol(v string) { +// SetIcmpType sets field value +func (o *FirewallruleProperties) SetIcmpType(v int32) { - o.Protocol = &v + o.IcmpType = &v } -// HasProtocol returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasProtocol() bool { - if o != nil && o.Protocol != nil { +// sets IcmpType to the explicit address that will be encoded as nil when marshaled +func (o *FirewallruleProperties) SetIcmpTypeNil() { + o.IcmpType = &Nilint32 +} + +// HasIcmpType returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasIcmpType() bool { + if o != nil && o.IcmpType != nil { return true } return false } -// GetSourceMac returns the SourceMac field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallruleProperties) GetSourceMac() *string { +// GetIpVersion returns the IpVersion field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetIpVersion() *string { if o == nil { return nil } - return o.SourceMac + return o.IpVersion } -// GetSourceMacOk returns a tuple with the SourceMac field value +// GetIpVersionOk returns a tuple with the IpVersion field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetSourceMacOk() (*string, bool) { +func (o *FirewallruleProperties) GetIpVersionOk() (*string, bool) { if o == nil { return nil, false } - return o.SourceMac, true + return o.IpVersion, true } -// SetSourceMac sets field value -func (o *FirewallruleProperties) SetSourceMac(v string) { +// SetIpVersion sets field value +func (o *FirewallruleProperties) SetIpVersion(v string) { - o.SourceMac = &v + o.IpVersion = &v } -// HasSourceMac returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasSourceMac() bool { - if o != nil && o.SourceMac != nil { +// sets IpVersion to the explicit address that will be encoded as nil when marshaled +func (o *FirewallruleProperties) SetIpVersionNil() { + o.IpVersion = &Nilstring +} + +// HasIpVersion returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasIpVersion() bool { + if o != nil && o.IpVersion != nil { return true } return false } -// GetIpVersion returns the IpVersion field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallruleProperties) GetIpVersion() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetName() *string { if o == nil { return nil } - return o.IpVersion + return o.Name } -// GetIpVersionOk returns a tuple with the IpVersion field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetIpVersionOk() (*string, bool) { +func (o *FirewallruleProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.IpVersion, true + return o.Name, true } -// SetIpVersion sets field value -func (o *FirewallruleProperties) SetIpVersion(v string) { +// SetName sets field value +func (o *FirewallruleProperties) SetName(v string) { - o.IpVersion = &v + o.Name = &v } -// HasIpVersion returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasIpVersion() bool { - if o != nil && o.IpVersion != nil { +// HasName returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetSourceIp returns the SourceIp field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallruleProperties) GetSourceIp() *string { +// GetPortRangeEnd returns the PortRangeEnd field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetPortRangeEnd() *int32 { if o == nil { return nil } - return o.SourceIp + return o.PortRangeEnd } -// GetSourceIpOk returns a tuple with the SourceIp field value +// GetPortRangeEndOk returns a tuple with the PortRangeEnd field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetSourceIpOk() (*string, bool) { +func (o *FirewallruleProperties) GetPortRangeEndOk() (*int32, bool) { if o == nil { return nil, false } - return o.SourceIp, true + return o.PortRangeEnd, true } -// SetSourceIp sets field value -func (o *FirewallruleProperties) SetSourceIp(v string) { +// SetPortRangeEnd sets field value +func (o *FirewallruleProperties) SetPortRangeEnd(v int32) { - o.SourceIp = &v + o.PortRangeEnd = &v } -// HasSourceIp returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasSourceIp() bool { - if o != nil && o.SourceIp != nil { +// HasPortRangeEnd returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasPortRangeEnd() bool { + if o != nil && o.PortRangeEnd != nil { return true } return false } -// GetTargetIp returns the TargetIp field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FirewallruleProperties) GetTargetIp() *string { +// GetPortRangeStart returns the PortRangeStart field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetPortRangeStart() *int32 { if o == nil { return nil } - return o.TargetIp + return o.PortRangeStart } -// GetTargetIpOk returns a tuple with the TargetIp field value +// GetPortRangeStartOk returns a tuple with the PortRangeStart field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetTargetIpOk() (*string, bool) { +func (o *FirewallruleProperties) GetPortRangeStartOk() (*int32, bool) { if o == nil { return nil, false } - return o.TargetIp, true + return o.PortRangeStart, true } -// SetTargetIp sets field value -func (o *FirewallruleProperties) SetTargetIp(v string) { +// SetPortRangeStart sets field value +func (o *FirewallruleProperties) SetPortRangeStart(v int32) { - o.TargetIp = &v + o.PortRangeStart = &v } -// HasTargetIp returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasTargetIp() bool { - if o != nil && o.TargetIp != nil { +// HasPortRangeStart returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasPortRangeStart() bool { + if o != nil && o.PortRangeStart != nil { return true } return false } -// GetIcmpCode returns the IcmpCode field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *FirewallruleProperties) GetIcmpCode() *int32 { +// GetProtocol returns the Protocol field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetProtocol() *string { if o == nil { return nil } - return o.IcmpCode + return o.Protocol } -// GetIcmpCodeOk returns a tuple with the IcmpCode field value +// GetProtocolOk returns a tuple with the Protocol field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetIcmpCodeOk() (*int32, bool) { +func (o *FirewallruleProperties) GetProtocolOk() (*string, bool) { if o == nil { return nil, false } - return o.IcmpCode, true + return o.Protocol, true } -// SetIcmpCode sets field value -func (o *FirewallruleProperties) SetIcmpCode(v int32) { +// SetProtocol sets field value +func (o *FirewallruleProperties) SetProtocol(v string) { - o.IcmpCode = &v + o.Protocol = &v } -// HasIcmpCode returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasIcmpCode() bool { - if o != nil && o.IcmpCode != nil { +// HasProtocol returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasProtocol() bool { + if o != nil && o.Protocol != nil { return true } return false } -// GetIcmpType returns the IcmpType field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *FirewallruleProperties) GetIcmpType() *int32 { +// GetSourceIp returns the SourceIp field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetSourceIp() *string { if o == nil { return nil } - return o.IcmpType + return o.SourceIp } -// GetIcmpTypeOk returns a tuple with the IcmpType field value +// GetSourceIpOk returns a tuple with the SourceIp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetIcmpTypeOk() (*int32, bool) { +func (o *FirewallruleProperties) GetSourceIpOk() (*string, bool) { if o == nil { return nil, false } - return o.IcmpType, true + return o.SourceIp, true } -// SetIcmpType sets field value -func (o *FirewallruleProperties) SetIcmpType(v int32) { +// SetSourceIp sets field value +func (o *FirewallruleProperties) SetSourceIp(v string) { - o.IcmpType = &v + o.SourceIp = &v } -// HasIcmpType returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasIcmpType() bool { - if o != nil && o.IcmpType != nil { +// sets SourceIp to the explicit address that will be encoded as nil when marshaled +func (o *FirewallruleProperties) SetSourceIpNil() { + o.SourceIp = &Nilstring +} + +// HasSourceIp returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasSourceIp() bool { + if o != nil && o.SourceIp != nil { return true } return false } -// GetPortRangeStart returns the PortRangeStart field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *FirewallruleProperties) GetPortRangeStart() *int32 { +// GetSourceMac returns the SourceMac field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetSourceMac() *string { if o == nil { return nil } - return o.PortRangeStart + return o.SourceMac } -// GetPortRangeStartOk returns a tuple with the PortRangeStart field value +// GetSourceMacOk returns a tuple with the SourceMac field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetPortRangeStartOk() (*int32, bool) { +func (o *FirewallruleProperties) GetSourceMacOk() (*string, bool) { if o == nil { return nil, false } - return o.PortRangeStart, true + return o.SourceMac, true } -// SetPortRangeStart sets field value -func (o *FirewallruleProperties) SetPortRangeStart(v int32) { +// SetSourceMac sets field value +func (o *FirewallruleProperties) SetSourceMac(v string) { - o.PortRangeStart = &v + o.SourceMac = &v } -// HasPortRangeStart returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasPortRangeStart() bool { - if o != nil && o.PortRangeStart != nil { +// sets SourceMac to the explicit address that will be encoded as nil when marshaled +func (o *FirewallruleProperties) SetSourceMacNil() { + o.SourceMac = &Nilstring +} + +// HasSourceMac returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasSourceMac() bool { + if o != nil && o.SourceMac != nil { return true } return false } -// GetPortRangeEnd returns the PortRangeEnd field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *FirewallruleProperties) GetPortRangeEnd() *int32 { +// GetTargetIp returns the TargetIp field value +// If the value is explicit nil, nil is returned +func (o *FirewallruleProperties) GetTargetIp() *string { if o == nil { return nil } - return o.PortRangeEnd + return o.TargetIp } -// GetPortRangeEndOk returns a tuple with the PortRangeEnd field value +// GetTargetIpOk returns a tuple with the TargetIp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FirewallruleProperties) GetPortRangeEndOk() (*int32, bool) { +func (o *FirewallruleProperties) GetTargetIpOk() (*string, bool) { if o == nil { return nil, false } - return o.PortRangeEnd, true + return o.TargetIp, true } -// SetPortRangeEnd sets field value -func (o *FirewallruleProperties) SetPortRangeEnd(v int32) { +// SetTargetIp sets field value +func (o *FirewallruleProperties) SetTargetIp(v string) { - o.PortRangeEnd = &v + o.TargetIp = &v } -// HasPortRangeEnd returns a boolean if a field has been set. -func (o *FirewallruleProperties) HasPortRangeEnd() bool { - if o != nil && o.PortRangeEnd != nil { +// sets TargetIp to the explicit address that will be encoded as nil when marshaled +func (o *FirewallruleProperties) SetTargetIpNil() { + o.TargetIp = &Nilstring +} + +// HasTargetIp returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasTargetIp() bool { + if o != nil && o.TargetIp != nil { return true } @@ -441,7 +477,7 @@ func (o *FirewallruleProperties) HasPortRangeEnd() bool { } // GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *FirewallruleProperties) GetType() *string { if o == nil { return nil @@ -480,29 +516,61 @@ func (o *FirewallruleProperties) HasType() bool { func (o FirewallruleProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + + if o.IcmpCode == &Nilint32 { + toSerialize["icmpCode"] = nil + } else if o.IcmpCode != nil { + toSerialize["icmpCode"] = o.IcmpCode + } + + if o.IcmpType == &Nilint32 { + toSerialize["icmpType"] = nil + } else if o.IcmpType != nil { + toSerialize["icmpType"] = o.IcmpType + } + + if o.IpVersion == &Nilstring { + toSerialize["ipVersion"] = nil + } else if o.IpVersion != nil { + toSerialize["ipVersion"] = o.IpVersion + } if o.Name != nil { toSerialize["name"] = o.Name } + + if o.PortRangeEnd != nil { + toSerialize["portRangeEnd"] = o.PortRangeEnd + } + + if o.PortRangeStart != nil { + toSerialize["portRangeStart"] = o.PortRangeStart + } + if o.Protocol != nil { toSerialize["protocol"] = o.Protocol } - toSerialize["sourceMac"] = o.SourceMac - if o.IpVersion != nil { - toSerialize["ipVersion"] = o.IpVersion + + if o.SourceIp == &Nilstring { + toSerialize["sourceIp"] = nil + } else if o.SourceIp != nil { + toSerialize["sourceIp"] = o.SourceIp } - toSerialize["sourceIp"] = o.SourceIp - toSerialize["targetIp"] = o.TargetIp - toSerialize["icmpCode"] = o.IcmpCode - toSerialize["icmpType"] = o.IcmpType - if o.PortRangeStart != nil { - toSerialize["portRangeStart"] = o.PortRangeStart + + if o.SourceMac == &Nilstring { + toSerialize["sourceMac"] = nil + } else if o.SourceMac != nil { + toSerialize["sourceMac"] = o.SourceMac } - if o.PortRangeEnd != nil { - toSerialize["portRangeEnd"] = o.PortRangeEnd + + if o.TargetIp == &Nilstring { + toSerialize["targetIp"] = nil + } else if o.TargetIp != nil { + toSerialize["targetIp"] = o.TargetIp } if o.Type != nil { toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go index e8d74517f..6e4412408 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go @@ -16,14 +16,14 @@ import ( // FlowLog struct for FlowLog type FlowLog struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *FlowLogProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewFlowLog instantiates a new FlowLog object @@ -46,190 +46,190 @@ func NewFlowLogWithDefaults() *FlowLog { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLog) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *FlowLog) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLog) GetIdOk() (*string, bool) { +func (o *FlowLog) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *FlowLog) SetId(v string) { +// SetHref sets field value +func (o *FlowLog) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *FlowLog) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *FlowLog) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *FlowLog) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *FlowLog) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLog) GetTypeOk() (*Type, bool) { +func (o *FlowLog) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *FlowLog) SetType(v Type) { +// SetId sets field value +func (o *FlowLog) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *FlowLog) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *FlowLog) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLog) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *FlowLog) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLog) GetHrefOk() (*string, bool) { +func (o *FlowLog) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *FlowLog) SetHref(v string) { +// SetMetadata sets field value +func (o *FlowLog) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *FlowLog) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *FlowLog) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *FlowLog) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *FlowLog) GetProperties() *FlowLogProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLog) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *FlowLog) GetPropertiesOk() (*FlowLogProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *FlowLog) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *FlowLog) SetProperties(v FlowLogProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *FlowLog) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *FlowLog) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for FlowLogProperties will be returned -func (o *FlowLog) GetProperties() *FlowLogProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *FlowLog) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLog) GetPropertiesOk() (*FlowLogProperties, bool) { +func (o *FlowLog) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *FlowLog) SetProperties(v FlowLogProperties) { +// SetType sets field value +func (o *FlowLog) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *FlowLog) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *FlowLog) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *FlowLog) HasProperties() bool { func (o FlowLog) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go index dca17c214..0f2a3b202 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go @@ -16,27 +16,27 @@ import ( // FlowLogProperties struct for FlowLogProperties type FlowLogProperties struct { - // The resource name. - Name *string `json:"name"` // Specifies the traffic action pattern. Action *string `json:"action"` - // Specifies the traffic direction pattern. - Direction *string `json:"direction"` // The S3 bucket name of an existing IONOS Cloud S3 bucket. Bucket *string `json:"bucket"` + // Specifies the traffic direction pattern. + Direction *string `json:"direction"` + // The resource name. + Name *string `json:"name"` } // NewFlowLogProperties instantiates a new FlowLogProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewFlowLogProperties(name string, action string, direction string, bucket string) *FlowLogProperties { +func NewFlowLogProperties(action string, bucket string, direction string, name string) *FlowLogProperties { this := FlowLogProperties{} - this.Name = &name this.Action = &action - this.Direction = &direction this.Bucket = &bucket + this.Direction = &direction + this.Name = &name return &this } @@ -49,76 +49,76 @@ func NewFlowLogPropertiesWithDefaults() *FlowLogProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogProperties) GetName() *string { +// GetAction returns the Action field value +// If the value is explicit nil, nil is returned +func (o *FlowLogProperties) GetAction() *string { if o == nil { return nil } - return o.Name + return o.Action } -// GetNameOk returns a tuple with the Name field value +// GetActionOk returns a tuple with the Action field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogProperties) GetNameOk() (*string, bool) { +func (o *FlowLogProperties) GetActionOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Action, true } -// SetName sets field value -func (o *FlowLogProperties) SetName(v string) { +// SetAction sets field value +func (o *FlowLogProperties) SetAction(v string) { - o.Name = &v + o.Action = &v } -// HasName returns a boolean if a field has been set. -func (o *FlowLogProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasAction returns a boolean if a field has been set. +func (o *FlowLogProperties) HasAction() bool { + if o != nil && o.Action != nil { return true } return false } -// GetAction returns the Action field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogProperties) GetAction() *string { +// GetBucket returns the Bucket field value +// If the value is explicit nil, nil is returned +func (o *FlowLogProperties) GetBucket() *string { if o == nil { return nil } - return o.Action + return o.Bucket } -// GetActionOk returns a tuple with the Action field value +// GetBucketOk returns a tuple with the Bucket field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogProperties) GetActionOk() (*string, bool) { +func (o *FlowLogProperties) GetBucketOk() (*string, bool) { if o == nil { return nil, false } - return o.Action, true + return o.Bucket, true } -// SetAction sets field value -func (o *FlowLogProperties) SetAction(v string) { +// SetBucket sets field value +func (o *FlowLogProperties) SetBucket(v string) { - o.Action = &v + o.Bucket = &v } -// HasAction returns a boolean if a field has been set. -func (o *FlowLogProperties) HasAction() bool { - if o != nil && o.Action != nil { +// HasBucket returns a boolean if a field has been set. +func (o *FlowLogProperties) HasBucket() bool { + if o != nil && o.Bucket != nil { return true } @@ -126,7 +126,7 @@ func (o *FlowLogProperties) HasAction() bool { } // GetDirection returns the Direction field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *FlowLogProperties) GetDirection() *string { if o == nil { return nil @@ -163,38 +163,38 @@ func (o *FlowLogProperties) HasDirection() bool { return false } -// GetBucket returns the Bucket field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogProperties) GetBucket() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *FlowLogProperties) GetName() *string { if o == nil { return nil } - return o.Bucket + return o.Name } -// GetBucketOk returns a tuple with the Bucket field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogProperties) GetBucketOk() (*string, bool) { +func (o *FlowLogProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Bucket, true + return o.Name, true } -// SetBucket sets field value -func (o *FlowLogProperties) SetBucket(v string) { +// SetName sets field value +func (o *FlowLogProperties) SetName(v string) { - o.Bucket = &v + o.Name = &v } -// HasBucket returns a boolean if a field has been set. -func (o *FlowLogProperties) HasBucket() bool { - if o != nil && o.Bucket != nil { +// HasName returns a boolean if a field has been set. +func (o *FlowLogProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -203,18 +203,22 @@ func (o *FlowLogProperties) HasBucket() bool { func (o FlowLogProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } if o.Action != nil { toSerialize["action"] = o.Action } + + if o.Bucket != nil { + toSerialize["bucket"] = o.Bucket + } + if o.Direction != nil { toSerialize["direction"] = o.Direction } - if o.Bucket != nil { - toSerialize["bucket"] = o.Bucket + + if o.Name != nil { + toSerialize["name"] = o.Name } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_put.go index 8f9b7f777..7fe84d71f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_put.go @@ -16,13 +16,13 @@ import ( // FlowLogPut struct for FlowLogPut type FlowLogPut struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *FlowLogProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *FlowLogProperties `json:"properties"` } // NewFlowLogPut instantiates a new FlowLogPut object @@ -45,152 +45,152 @@ func NewFlowLogPutWithDefaults() *FlowLogPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *FlowLogPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogPut) GetIdOk() (*string, bool) { +func (o *FlowLogPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *FlowLogPut) SetId(v string) { +// SetHref sets field value +func (o *FlowLogPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *FlowLogPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *FlowLogPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *FlowLogPut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *FlowLogPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogPut) GetTypeOk() (*Type, bool) { +func (o *FlowLogPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *FlowLogPut) SetType(v Type) { +// SetId sets field value +func (o *FlowLogPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *FlowLogPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *FlowLogPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogPut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *FlowLogPut) GetProperties() *FlowLogProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogPut) GetHrefOk() (*string, bool) { +func (o *FlowLogPut) GetPropertiesOk() (*FlowLogProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *FlowLogPut) SetHref(v string) { +// SetProperties sets field value +func (o *FlowLogPut) SetProperties(v FlowLogProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *FlowLogPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *FlowLogPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for FlowLogProperties will be returned -func (o *FlowLogPut) GetProperties() *FlowLogProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *FlowLogPut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogPut) GetPropertiesOk() (*FlowLogProperties, bool) { +func (o *FlowLogPut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *FlowLogPut) SetProperties(v FlowLogProperties) { +// SetType sets field value +func (o *FlowLogPut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *FlowLogPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *FlowLogPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *FlowLogPut) HasProperties() bool { func (o FlowLogPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go index 48f070e31..653d925ab 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go @@ -16,19 +16,19 @@ import ( // FlowLogs struct for FlowLogs type FlowLogs struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]FlowLog `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewFlowLogs instantiates a new FlowLogs object @@ -49,114 +49,114 @@ func NewFlowLogsWithDefaults() *FlowLogs { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogs) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *FlowLogs) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogs) GetIdOk() (*string, bool) { +func (o *FlowLogs) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *FlowLogs) SetId(v string) { +// SetLinks sets field value +func (o *FlowLogs) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *FlowLogs) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *FlowLogs) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *FlowLogs) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *FlowLogs) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogs) GetTypeOk() (*Type, bool) { +func (o *FlowLogs) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *FlowLogs) SetType(v Type) { +// SetHref sets field value +func (o *FlowLogs) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *FlowLogs) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *FlowLogs) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *FlowLogs) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *FlowLogs) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogs) GetHrefOk() (*string, bool) { +func (o *FlowLogs) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *FlowLogs) SetHref(v string) { +// SetId sets field value +func (o *FlowLogs) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *FlowLogs) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *FlowLogs) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *FlowLogs) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []FlowLog will be returned +// If the value is explicit nil, nil is returned func (o *FlowLogs) GetItems() *[]FlowLog { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *FlowLogs) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *FlowLogs) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *FlowLogs) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogs) GetOffsetOk() (*float32, bool) { +func (o *FlowLogs) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *FlowLogs) SetOffset(v float32) { +// SetLimit sets field value +func (o *FlowLogs) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *FlowLogs) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *FlowLogs) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *FlowLogs) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *FlowLogs) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogs) GetLimitOk() (*float32, bool) { +func (o *FlowLogs) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *FlowLogs) SetLimit(v float32) { +// SetOffset sets field value +func (o *FlowLogs) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *FlowLogs) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *FlowLogs) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *FlowLogs) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *FlowLogs) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *FlowLogs) GetLinksOk() (*PaginationLinks, bool) { +func (o *FlowLogs) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *FlowLogs) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *FlowLogs) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *FlowLogs) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *FlowLogs) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *FlowLogs) HasLinks() bool { func (o FlowLogs) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group.go index a1e9b6781..1509dd460 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group.go @@ -16,14 +16,14 @@ import ( // Group struct for Group type Group struct { + Entities *GroupEntities `json:"entities,omitempty"` + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *GroupProperties `json:"properties"` // The type of the resource. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *GroupProperties `json:"properties"` - Entities *GroupEntities `json:"entities,omitempty"` } // NewGroup instantiates a new Group object @@ -46,114 +46,114 @@ func NewGroupWithDefaults() *Group { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Group) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Group) GetEntities() *GroupEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Group) GetIdOk() (*string, bool) { +func (o *Group) GetEntitiesOk() (*GroupEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Group) SetId(v string) { +// SetEntities sets field value +func (o *Group) SetEntities(v GroupEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Group) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Group) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Group) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Group) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Group) GetTypeOk() (*Type, bool) { +func (o *Group) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Group) SetType(v Type) { +// SetHref sets field value +func (o *Group) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Group) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Group) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Group) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Group) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Group) GetHrefOk() (*string, bool) { +func (o *Group) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Group) SetHref(v string) { +// SetId sets field value +func (o *Group) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Group) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Group) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -161,7 +161,7 @@ func (o *Group) HasHref() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for GroupProperties will be returned +// If the value is explicit nil, nil is returned func (o *Group) GetProperties() *GroupProperties { if o == nil { return nil @@ -198,38 +198,38 @@ func (o *Group) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for GroupEntities will be returned -func (o *Group) GetEntities() *GroupEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Group) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Group) GetEntitiesOk() (*GroupEntities, bool) { +func (o *Group) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Group) SetEntities(v GroupEntities) { +// SetType sets field value +func (o *Group) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Group) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Group) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Group) HasEntities() bool { func (o Group) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_entities.go index 067e84cac..f9e914622 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_entities.go @@ -16,8 +16,8 @@ import ( // GroupEntities struct for GroupEntities type GroupEntities struct { - Users *GroupMembers `json:"users,omitempty"` Resources *ResourceGroups `json:"resources,omitempty"` + Users *GroupMembers `json:"users,omitempty"` } // NewGroupEntities instantiates a new GroupEntities object @@ -38,76 +38,76 @@ func NewGroupEntitiesWithDefaults() *GroupEntities { return &this } -// GetUsers returns the Users field value -// If the value is explicit nil, the zero value for GroupMembers will be returned -func (o *GroupEntities) GetUsers() *GroupMembers { +// GetResources returns the Resources field value +// If the value is explicit nil, nil is returned +func (o *GroupEntities) GetResources() *ResourceGroups { if o == nil { return nil } - return o.Users + return o.Resources } -// GetUsersOk returns a tuple with the Users field value +// GetResourcesOk returns a tuple with the Resources field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupEntities) GetUsersOk() (*GroupMembers, bool) { +func (o *GroupEntities) GetResourcesOk() (*ResourceGroups, bool) { if o == nil { return nil, false } - return o.Users, true + return o.Resources, true } -// SetUsers sets field value -func (o *GroupEntities) SetUsers(v GroupMembers) { +// SetResources sets field value +func (o *GroupEntities) SetResources(v ResourceGroups) { - o.Users = &v + o.Resources = &v } -// HasUsers returns a boolean if a field has been set. -func (o *GroupEntities) HasUsers() bool { - if o != nil && o.Users != nil { +// HasResources returns a boolean if a field has been set. +func (o *GroupEntities) HasResources() bool { + if o != nil && o.Resources != nil { return true } return false } -// GetResources returns the Resources field value -// If the value is explicit nil, the zero value for ResourceGroups will be returned -func (o *GroupEntities) GetResources() *ResourceGroups { +// GetUsers returns the Users field value +// If the value is explicit nil, nil is returned +func (o *GroupEntities) GetUsers() *GroupMembers { if o == nil { return nil } - return o.Resources + return o.Users } -// GetResourcesOk returns a tuple with the Resources field value +// GetUsersOk returns a tuple with the Users field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupEntities) GetResourcesOk() (*ResourceGroups, bool) { +func (o *GroupEntities) GetUsersOk() (*GroupMembers, bool) { if o == nil { return nil, false } - return o.Resources, true + return o.Users, true } -// SetResources sets field value -func (o *GroupEntities) SetResources(v ResourceGroups) { +// SetUsers sets field value +func (o *GroupEntities) SetUsers(v GroupMembers) { - o.Resources = &v + o.Users = &v } -// HasResources returns a boolean if a field has been set. -func (o *GroupEntities) HasResources() bool { - if o != nil && o.Resources != nil { +// HasUsers returns a boolean if a field has been set. +func (o *GroupEntities) HasUsers() bool { + if o != nil && o.Users != nil { return true } @@ -116,12 +116,14 @@ func (o *GroupEntities) HasResources() bool { func (o GroupEntities) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Users != nil { - toSerialize["users"] = o.Users - } if o.Resources != nil { toSerialize["resources"] = o.Resources } + + if o.Users != nil { + toSerialize["users"] = o.Users + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_members.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_members.go index c38d7c018..a4a750e78 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_members.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_members.go @@ -16,14 +16,14 @@ import ( // GroupMembers struct for GroupMembers type GroupMembers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]User `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewGroupMembers instantiates a new GroupMembers object @@ -44,152 +44,152 @@ func NewGroupMembersWithDefaults() *GroupMembers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupMembers) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *GroupMembers) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupMembers) GetIdOk() (*string, bool) { +func (o *GroupMembers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *GroupMembers) SetId(v string) { +// SetHref sets field value +func (o *GroupMembers) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *GroupMembers) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *GroupMembers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *GroupMembers) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *GroupMembers) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupMembers) GetTypeOk() (*Type, bool) { +func (o *GroupMembers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *GroupMembers) SetType(v Type) { +// SetId sets field value +func (o *GroupMembers) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *GroupMembers) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *GroupMembers) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupMembers) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *GroupMembers) GetItems() *[]User { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupMembers) GetHrefOk() (*string, bool) { +func (o *GroupMembers) GetItemsOk() (*[]User, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *GroupMembers) SetHref(v string) { +// SetItems sets field value +func (o *GroupMembers) SetItems(v []User) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *GroupMembers) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *GroupMembers) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []User will be returned -func (o *GroupMembers) GetItems() *[]User { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *GroupMembers) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupMembers) GetItemsOk() (*[]User, bool) { +func (o *GroupMembers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *GroupMembers) SetItems(v []User) { +// SetType sets field value +func (o *GroupMembers) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *GroupMembers) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *GroupMembers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *GroupMembers) HasItems() bool { func (o GroupMembers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go index c4c55d9b1..d73cb668b 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go @@ -16,40 +16,40 @@ import ( // GroupProperties struct for GroupProperties type GroupProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` - // Create data center privilege. - CreateDataCenter *bool `json:"createDataCenter,omitempty"` - // Create snapshot privilege. - CreateSnapshot *bool `json:"createSnapshot,omitempty"` - // Reserve IP block privilege. - ReserveIp *bool `json:"reserveIp,omitempty"` // Activity log access privilege. AccessActivityLog *bool `json:"accessActivityLog,omitempty"` - // Create pcc privilege. - CreatePcc *bool `json:"createPcc,omitempty"` - // S3 privilege. - S3Privilege *bool `json:"s3Privilege,omitempty"` + // Privilege for a group to access and manage certificates. + AccessAndManageCertificates *bool `json:"accessAndManageCertificates,omitempty"` + // Privilege for a group to access and manage dns records. + AccessAndManageDns *bool `json:"accessAndManageDns,omitempty"` + // Privilege for a group to access and manage monitoring related functionality (access metrics, CRUD on alarms, alarm-actions etc) using Monotoring-as-a-Service (MaaS). + AccessAndManageMonitoring *bool `json:"accessAndManageMonitoring,omitempty"` // Create backup unit privilege. CreateBackupUnit *bool `json:"createBackupUnit,omitempty"` + // Create data center privilege. + CreateDataCenter *bool `json:"createDataCenter,omitempty"` + // Create Flow Logs privilege. + CreateFlowLog *bool `json:"createFlowLog,omitempty"` // Create internet access privilege. CreateInternetAccess *bool `json:"createInternetAccess,omitempty"` // Create Kubernetes cluster privilege. CreateK8sCluster *bool `json:"createK8sCluster,omitempty"` - // Create Flow Logs privilege. - CreateFlowLog *bool `json:"createFlowLog,omitempty"` - // Privilege for a group to access and manage monitoring related functionality (access metrics, CRUD on alarms, alarm-actions etc) using Monotoring-as-a-Service (MaaS). - AccessAndManageMonitoring *bool `json:"accessAndManageMonitoring,omitempty"` - // Privilege for a group to access and manage certificates. - AccessAndManageCertificates *bool `json:"accessAndManageCertificates,omitempty"` + // Create pcc privilege. + CreatePcc *bool `json:"createPcc,omitempty"` + // Create snapshot privilege. + CreateSnapshot *bool `json:"createSnapshot,omitempty"` // Privilege for a group to manage DBaaS related functionality. ManageDBaaS *bool `json:"manageDBaaS,omitempty"` - // Privilege for a group to access and manage dns records. - AccessAndManageDns *bool `json:"accessAndManageDns,omitempty"` + // Privilege for a group to access and manage the Data Platform. + ManageDataplatform *bool `json:"manageDataplatform,omitempty"` // Privilege for group accessing container registry related functionality. ManageRegistry *bool `json:"manageRegistry,omitempty"` - // Privilege for a group to access and manage Data Platform. - ManageDataplatform *bool `json:"manageDataplatform,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` + // Reserve IP block privilege. + ReserveIp *bool `json:"reserveIp,omitempty"` + // S3 privilege. + S3Privilege *bool `json:"s3Privilege,omitempty"` } // NewGroupProperties instantiates a new GroupProperties object @@ -70,304 +70,266 @@ func NewGroupPropertiesWithDefaults() *GroupProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupProperties) GetName() *string { - if o == nil { - return nil - } - - return o.Name - -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Name, true -} - -// SetName sets field value -func (o *GroupProperties) SetName(v string) { - - o.Name = &v - -} - -// HasName returns a boolean if a field has been set. -func (o *GroupProperties) HasName() bool { - if o != nil && o.Name != nil { - return true - } - - return false -} - -// GetCreateDataCenter returns the CreateDataCenter field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetCreateDataCenter() *bool { +// GetAccessActivityLog returns the AccessActivityLog field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetAccessActivityLog() *bool { if o == nil { return nil } - return o.CreateDataCenter + return o.AccessActivityLog } -// GetCreateDataCenterOk returns a tuple with the CreateDataCenter field value +// GetAccessActivityLogOk returns a tuple with the AccessActivityLog field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetCreateDataCenterOk() (*bool, bool) { +func (o *GroupProperties) GetAccessActivityLogOk() (*bool, bool) { if o == nil { return nil, false } - return o.CreateDataCenter, true + return o.AccessActivityLog, true } -// SetCreateDataCenter sets field value -func (o *GroupProperties) SetCreateDataCenter(v bool) { +// SetAccessActivityLog sets field value +func (o *GroupProperties) SetAccessActivityLog(v bool) { - o.CreateDataCenter = &v + o.AccessActivityLog = &v } -// HasCreateDataCenter returns a boolean if a field has been set. -func (o *GroupProperties) HasCreateDataCenter() bool { - if o != nil && o.CreateDataCenter != nil { +// HasAccessActivityLog returns a boolean if a field has been set. +func (o *GroupProperties) HasAccessActivityLog() bool { + if o != nil && o.AccessActivityLog != nil { return true } return false } -// GetCreateSnapshot returns the CreateSnapshot field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetCreateSnapshot() *bool { +// GetAccessAndManageCertificates returns the AccessAndManageCertificates field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetAccessAndManageCertificates() *bool { if o == nil { return nil } - return o.CreateSnapshot + return o.AccessAndManageCertificates } -// GetCreateSnapshotOk returns a tuple with the CreateSnapshot field value +// GetAccessAndManageCertificatesOk returns a tuple with the AccessAndManageCertificates field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetCreateSnapshotOk() (*bool, bool) { +func (o *GroupProperties) GetAccessAndManageCertificatesOk() (*bool, bool) { if o == nil { return nil, false } - return o.CreateSnapshot, true + return o.AccessAndManageCertificates, true } -// SetCreateSnapshot sets field value -func (o *GroupProperties) SetCreateSnapshot(v bool) { +// SetAccessAndManageCertificates sets field value +func (o *GroupProperties) SetAccessAndManageCertificates(v bool) { - o.CreateSnapshot = &v + o.AccessAndManageCertificates = &v } -// HasCreateSnapshot returns a boolean if a field has been set. -func (o *GroupProperties) HasCreateSnapshot() bool { - if o != nil && o.CreateSnapshot != nil { +// HasAccessAndManageCertificates returns a boolean if a field has been set. +func (o *GroupProperties) HasAccessAndManageCertificates() bool { + if o != nil && o.AccessAndManageCertificates != nil { return true } return false } -// GetReserveIp returns the ReserveIp field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetReserveIp() *bool { +// GetAccessAndManageDns returns the AccessAndManageDns field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetAccessAndManageDns() *bool { if o == nil { return nil } - return o.ReserveIp + return o.AccessAndManageDns } -// GetReserveIpOk returns a tuple with the ReserveIp field value +// GetAccessAndManageDnsOk returns a tuple with the AccessAndManageDns field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetReserveIpOk() (*bool, bool) { +func (o *GroupProperties) GetAccessAndManageDnsOk() (*bool, bool) { if o == nil { return nil, false } - return o.ReserveIp, true + return o.AccessAndManageDns, true } -// SetReserveIp sets field value -func (o *GroupProperties) SetReserveIp(v bool) { +// SetAccessAndManageDns sets field value +func (o *GroupProperties) SetAccessAndManageDns(v bool) { - o.ReserveIp = &v + o.AccessAndManageDns = &v } -// HasReserveIp returns a boolean if a field has been set. -func (o *GroupProperties) HasReserveIp() bool { - if o != nil && o.ReserveIp != nil { +// HasAccessAndManageDns returns a boolean if a field has been set. +func (o *GroupProperties) HasAccessAndManageDns() bool { + if o != nil && o.AccessAndManageDns != nil { return true } return false } -// GetAccessActivityLog returns the AccessActivityLog field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetAccessActivityLog() *bool { +// GetAccessAndManageMonitoring returns the AccessAndManageMonitoring field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetAccessAndManageMonitoring() *bool { if o == nil { return nil } - return o.AccessActivityLog + return o.AccessAndManageMonitoring } -// GetAccessActivityLogOk returns a tuple with the AccessActivityLog field value +// GetAccessAndManageMonitoringOk returns a tuple with the AccessAndManageMonitoring field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetAccessActivityLogOk() (*bool, bool) { +func (o *GroupProperties) GetAccessAndManageMonitoringOk() (*bool, bool) { if o == nil { return nil, false } - return o.AccessActivityLog, true + return o.AccessAndManageMonitoring, true } -// SetAccessActivityLog sets field value -func (o *GroupProperties) SetAccessActivityLog(v bool) { +// SetAccessAndManageMonitoring sets field value +func (o *GroupProperties) SetAccessAndManageMonitoring(v bool) { - o.AccessActivityLog = &v + o.AccessAndManageMonitoring = &v } -// HasAccessActivityLog returns a boolean if a field has been set. -func (o *GroupProperties) HasAccessActivityLog() bool { - if o != nil && o.AccessActivityLog != nil { +// HasAccessAndManageMonitoring returns a boolean if a field has been set. +func (o *GroupProperties) HasAccessAndManageMonitoring() bool { + if o != nil && o.AccessAndManageMonitoring != nil { return true } return false } -// GetCreatePcc returns the CreatePcc field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetCreatePcc() *bool { +// GetCreateBackupUnit returns the CreateBackupUnit field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetCreateBackupUnit() *bool { if o == nil { return nil } - return o.CreatePcc + return o.CreateBackupUnit } -// GetCreatePccOk returns a tuple with the CreatePcc field value +// GetCreateBackupUnitOk returns a tuple with the CreateBackupUnit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetCreatePccOk() (*bool, bool) { +func (o *GroupProperties) GetCreateBackupUnitOk() (*bool, bool) { if o == nil { return nil, false } - return o.CreatePcc, true + return o.CreateBackupUnit, true } -// SetCreatePcc sets field value -func (o *GroupProperties) SetCreatePcc(v bool) { +// SetCreateBackupUnit sets field value +func (o *GroupProperties) SetCreateBackupUnit(v bool) { - o.CreatePcc = &v + o.CreateBackupUnit = &v } -// HasCreatePcc returns a boolean if a field has been set. -func (o *GroupProperties) HasCreatePcc() bool { - if o != nil && o.CreatePcc != nil { +// HasCreateBackupUnit returns a boolean if a field has been set. +func (o *GroupProperties) HasCreateBackupUnit() bool { + if o != nil && o.CreateBackupUnit != nil { return true } return false } -// GetS3Privilege returns the S3Privilege field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetS3Privilege() *bool { +// GetCreateDataCenter returns the CreateDataCenter field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetCreateDataCenter() *bool { if o == nil { return nil } - return o.S3Privilege + return o.CreateDataCenter } -// GetS3PrivilegeOk returns a tuple with the S3Privilege field value +// GetCreateDataCenterOk returns a tuple with the CreateDataCenter field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetS3PrivilegeOk() (*bool, bool) { +func (o *GroupProperties) GetCreateDataCenterOk() (*bool, bool) { if o == nil { return nil, false } - return o.S3Privilege, true + return o.CreateDataCenter, true } -// SetS3Privilege sets field value -func (o *GroupProperties) SetS3Privilege(v bool) { +// SetCreateDataCenter sets field value +func (o *GroupProperties) SetCreateDataCenter(v bool) { - o.S3Privilege = &v + o.CreateDataCenter = &v } -// HasS3Privilege returns a boolean if a field has been set. -func (o *GroupProperties) HasS3Privilege() bool { - if o != nil && o.S3Privilege != nil { +// HasCreateDataCenter returns a boolean if a field has been set. +func (o *GroupProperties) HasCreateDataCenter() bool { + if o != nil && o.CreateDataCenter != nil { return true } return false } -// GetCreateBackupUnit returns the CreateBackupUnit field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetCreateBackupUnit() *bool { +// GetCreateFlowLog returns the CreateFlowLog field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetCreateFlowLog() *bool { if o == nil { return nil } - return o.CreateBackupUnit + return o.CreateFlowLog } -// GetCreateBackupUnitOk returns a tuple with the CreateBackupUnit field value +// GetCreateFlowLogOk returns a tuple with the CreateFlowLog field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetCreateBackupUnitOk() (*bool, bool) { +func (o *GroupProperties) GetCreateFlowLogOk() (*bool, bool) { if o == nil { return nil, false } - return o.CreateBackupUnit, true + return o.CreateFlowLog, true } -// SetCreateBackupUnit sets field value -func (o *GroupProperties) SetCreateBackupUnit(v bool) { +// SetCreateFlowLog sets field value +func (o *GroupProperties) SetCreateFlowLog(v bool) { - o.CreateBackupUnit = &v + o.CreateFlowLog = &v } -// HasCreateBackupUnit returns a boolean if a field has been set. -func (o *GroupProperties) HasCreateBackupUnit() bool { - if o != nil && o.CreateBackupUnit != nil { +// HasCreateFlowLog returns a boolean if a field has been set. +func (o *GroupProperties) HasCreateFlowLog() bool { + if o != nil && o.CreateFlowLog != nil { return true } @@ -375,7 +337,7 @@ func (o *GroupProperties) HasCreateBackupUnit() bool { } // GetCreateInternetAccess returns the CreateInternetAccess field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *GroupProperties) GetCreateInternetAccess() *bool { if o == nil { return nil @@ -413,7 +375,7 @@ func (o *GroupProperties) HasCreateInternetAccess() bool { } // GetCreateK8sCluster returns the CreateK8sCluster field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *GroupProperties) GetCreateK8sCluster() *bool { if o == nil { return nil @@ -450,114 +412,76 @@ func (o *GroupProperties) HasCreateK8sCluster() bool { return false } -// GetCreateFlowLog returns the CreateFlowLog field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetCreateFlowLog() *bool { +// GetCreatePcc returns the CreatePcc field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetCreatePcc() *bool { if o == nil { return nil } - return o.CreateFlowLog + return o.CreatePcc } -// GetCreateFlowLogOk returns a tuple with the CreateFlowLog field value +// GetCreatePccOk returns a tuple with the CreatePcc field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetCreateFlowLogOk() (*bool, bool) { - if o == nil { - return nil, false - } - - return o.CreateFlowLog, true -} - -// SetCreateFlowLog sets field value -func (o *GroupProperties) SetCreateFlowLog(v bool) { - - o.CreateFlowLog = &v - -} - -// HasCreateFlowLog returns a boolean if a field has been set. -func (o *GroupProperties) HasCreateFlowLog() bool { - if o != nil && o.CreateFlowLog != nil { - return true - } - - return false -} - -// GetAccessAndManageMonitoring returns the AccessAndManageMonitoring field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetAccessAndManageMonitoring() *bool { - if o == nil { - return nil - } - - return o.AccessAndManageMonitoring - -} - -// GetAccessAndManageMonitoringOk returns a tuple with the AccessAndManageMonitoring field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetAccessAndManageMonitoringOk() (*bool, bool) { +func (o *GroupProperties) GetCreatePccOk() (*bool, bool) { if o == nil { return nil, false } - return o.AccessAndManageMonitoring, true + return o.CreatePcc, true } -// SetAccessAndManageMonitoring sets field value -func (o *GroupProperties) SetAccessAndManageMonitoring(v bool) { +// SetCreatePcc sets field value +func (o *GroupProperties) SetCreatePcc(v bool) { - o.AccessAndManageMonitoring = &v + o.CreatePcc = &v } -// HasAccessAndManageMonitoring returns a boolean if a field has been set. -func (o *GroupProperties) HasAccessAndManageMonitoring() bool { - if o != nil && o.AccessAndManageMonitoring != nil { +// HasCreatePcc returns a boolean if a field has been set. +func (o *GroupProperties) HasCreatePcc() bool { + if o != nil && o.CreatePcc != nil { return true } return false } -// GetAccessAndManageCertificates returns the AccessAndManageCertificates field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetAccessAndManageCertificates() *bool { +// GetCreateSnapshot returns the CreateSnapshot field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetCreateSnapshot() *bool { if o == nil { return nil } - return o.AccessAndManageCertificates + return o.CreateSnapshot } -// GetAccessAndManageCertificatesOk returns a tuple with the AccessAndManageCertificates field value +// GetCreateSnapshotOk returns a tuple with the CreateSnapshot field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetAccessAndManageCertificatesOk() (*bool, bool) { +func (o *GroupProperties) GetCreateSnapshotOk() (*bool, bool) { if o == nil { return nil, false } - return o.AccessAndManageCertificates, true + return o.CreateSnapshot, true } -// SetAccessAndManageCertificates sets field value -func (o *GroupProperties) SetAccessAndManageCertificates(v bool) { +// SetCreateSnapshot sets field value +func (o *GroupProperties) SetCreateSnapshot(v bool) { - o.AccessAndManageCertificates = &v + o.CreateSnapshot = &v } -// HasAccessAndManageCertificates returns a boolean if a field has been set. -func (o *GroupProperties) HasAccessAndManageCertificates() bool { - if o != nil && o.AccessAndManageCertificates != nil { +// HasCreateSnapshot returns a boolean if a field has been set. +func (o *GroupProperties) HasCreateSnapshot() bool { + if o != nil && o.CreateSnapshot != nil { return true } @@ -565,7 +489,7 @@ func (o *GroupProperties) HasAccessAndManageCertificates() bool { } // GetManageDBaaS returns the ManageDBaaS field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *GroupProperties) GetManageDBaaS() *bool { if o == nil { return nil @@ -602,38 +526,38 @@ func (o *GroupProperties) HasManageDBaaS() bool { return false } -// GetAccessAndManageDns returns the AccessAndManageDns field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetAccessAndManageDns() *bool { +// GetManageDataplatform returns the ManageDataplatform field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetManageDataplatform() *bool { if o == nil { return nil } - return o.AccessAndManageDns + return o.ManageDataplatform } -// GetAccessAndManageDnsOk returns a tuple with the AccessAndManageDns field value +// GetManageDataplatformOk returns a tuple with the ManageDataplatform field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetAccessAndManageDnsOk() (*bool, bool) { +func (o *GroupProperties) GetManageDataplatformOk() (*bool, bool) { if o == nil { return nil, false } - return o.AccessAndManageDns, true + return o.ManageDataplatform, true } -// SetAccessAndManageDns sets field value -func (o *GroupProperties) SetAccessAndManageDns(v bool) { +// SetManageDataplatform sets field value +func (o *GroupProperties) SetManageDataplatform(v bool) { - o.AccessAndManageDns = &v + o.ManageDataplatform = &v } -// HasAccessAndManageDns returns a boolean if a field has been set. -func (o *GroupProperties) HasAccessAndManageDns() bool { - if o != nil && o.AccessAndManageDns != nil { +// HasManageDataplatform returns a boolean if a field has been set. +func (o *GroupProperties) HasManageDataplatform() bool { + if o != nil && o.ManageDataplatform != nil { return true } @@ -641,7 +565,7 @@ func (o *GroupProperties) HasAccessAndManageDns() bool { } // GetManageRegistry returns the ManageRegistry field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *GroupProperties) GetManageRegistry() *bool { if o == nil { return nil @@ -678,97 +602,190 @@ func (o *GroupProperties) HasManageRegistry() bool { return false } -// GetManageDataplatform returns the ManageDataplatform field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *GroupProperties) GetManageDataplatform() *bool { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetName() *string { if o == nil { return nil } - return o.ManageDataplatform + return o.Name } -// GetManageDataplatformOk returns a tuple with the ManageDataplatform field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupProperties) GetManageDataplatformOk() (*bool, bool) { +func (o *GroupProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.ManageDataplatform, true + return o.Name, true } -// SetManageDataplatform sets field value -func (o *GroupProperties) SetManageDataplatform(v bool) { +// SetName sets field value +func (o *GroupProperties) SetName(v string) { - o.ManageDataplatform = &v + o.Name = &v } -// HasManageDataplatform returns a boolean if a field has been set. -func (o *GroupProperties) HasManageDataplatform() bool { - if o != nil && o.ManageDataplatform != nil { +// HasName returns a boolean if a field has been set. +func (o *GroupProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -func (o GroupProperties) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name +// GetReserveIp returns the ReserveIp field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetReserveIp() *bool { + if o == nil { + return nil } - if o.CreateDataCenter != nil { - toSerialize["createDataCenter"] = o.CreateDataCenter + + return o.ReserveIp + +} + +// GetReserveIpOk returns a tuple with the ReserveIp field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *GroupProperties) GetReserveIpOk() (*bool, bool) { + if o == nil { + return nil, false } - if o.CreateSnapshot != nil { - toSerialize["createSnapshot"] = o.CreateSnapshot + + return o.ReserveIp, true +} + +// SetReserveIp sets field value +func (o *GroupProperties) SetReserveIp(v bool) { + + o.ReserveIp = &v + +} + +// HasReserveIp returns a boolean if a field has been set. +func (o *GroupProperties) HasReserveIp() bool { + if o != nil && o.ReserveIp != nil { + return true } - if o.ReserveIp != nil { - toSerialize["reserveIp"] = o.ReserveIp + + return false +} + +// GetS3Privilege returns the S3Privilege field value +// If the value is explicit nil, nil is returned +func (o *GroupProperties) GetS3Privilege() *bool { + if o == nil { + return nil + } + + return o.S3Privilege + +} + +// GetS3PrivilegeOk returns a tuple with the S3Privilege field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *GroupProperties) GetS3PrivilegeOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.S3Privilege, true +} + +// SetS3Privilege sets field value +func (o *GroupProperties) SetS3Privilege(v bool) { + + o.S3Privilege = &v + +} + +// HasS3Privilege returns a boolean if a field has been set. +func (o *GroupProperties) HasS3Privilege() bool { + if o != nil && o.S3Privilege != nil { + return true } + + return false +} + +func (o GroupProperties) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} if o.AccessActivityLog != nil { toSerialize["accessActivityLog"] = o.AccessActivityLog } - if o.CreatePcc != nil { - toSerialize["createPcc"] = o.CreatePcc + + if o.AccessAndManageCertificates != nil { + toSerialize["accessAndManageCertificates"] = o.AccessAndManageCertificates } - if o.S3Privilege != nil { - toSerialize["s3Privilege"] = o.S3Privilege + + if o.AccessAndManageDns != nil { + toSerialize["accessAndManageDns"] = o.AccessAndManageDns } + + if o.AccessAndManageMonitoring != nil { + toSerialize["accessAndManageMonitoring"] = o.AccessAndManageMonitoring + } + if o.CreateBackupUnit != nil { toSerialize["createBackupUnit"] = o.CreateBackupUnit } + + if o.CreateDataCenter != nil { + toSerialize["createDataCenter"] = o.CreateDataCenter + } + + if o.CreateFlowLog != nil { + toSerialize["createFlowLog"] = o.CreateFlowLog + } + if o.CreateInternetAccess != nil { toSerialize["createInternetAccess"] = o.CreateInternetAccess } + if o.CreateK8sCluster != nil { toSerialize["createK8sCluster"] = o.CreateK8sCluster } - if o.CreateFlowLog != nil { - toSerialize["createFlowLog"] = o.CreateFlowLog - } - if o.AccessAndManageMonitoring != nil { - toSerialize["accessAndManageMonitoring"] = o.AccessAndManageMonitoring + + if o.CreatePcc != nil { + toSerialize["createPcc"] = o.CreatePcc } - if o.AccessAndManageCertificates != nil { - toSerialize["accessAndManageCertificates"] = o.AccessAndManageCertificates + + if o.CreateSnapshot != nil { + toSerialize["createSnapshot"] = o.CreateSnapshot } + if o.ManageDBaaS != nil { toSerialize["manageDBaaS"] = o.ManageDBaaS } - if o.AccessAndManageDns != nil { - toSerialize["accessAndManageDns"] = o.AccessAndManageDns + + if o.ManageDataplatform != nil { + toSerialize["manageDataplatform"] = o.ManageDataplatform } + if o.ManageRegistry != nil { toSerialize["manageRegistry"] = o.ManageRegistry } - if o.ManageDataplatform != nil { - toSerialize["manageDataplatform"] = o.ManageDataplatform + + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.ReserveIp != nil { + toSerialize["reserveIp"] = o.ReserveIp + } + + if o.S3Privilege != nil { + toSerialize["s3Privilege"] = o.S3Privilege } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share.go index a25a874a3..ae84478aa 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share.go @@ -16,13 +16,13 @@ import ( // GroupShare struct for GroupShare type GroupShare struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *GroupShareProperties `json:"properties"` // resource as generic type Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *GroupShareProperties `json:"properties"` } // NewGroupShare instantiates a new GroupShare object @@ -45,152 +45,152 @@ func NewGroupShareWithDefaults() *GroupShare { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupShare) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *GroupShare) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShare) GetIdOk() (*string, bool) { +func (o *GroupShare) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *GroupShare) SetId(v string) { +// SetHref sets field value +func (o *GroupShare) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *GroupShare) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *GroupShare) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *GroupShare) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *GroupShare) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShare) GetTypeOk() (*Type, bool) { +func (o *GroupShare) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *GroupShare) SetType(v Type) { +// SetId sets field value +func (o *GroupShare) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *GroupShare) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *GroupShare) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupShare) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *GroupShare) GetProperties() *GroupShareProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShare) GetHrefOk() (*string, bool) { +func (o *GroupShare) GetPropertiesOk() (*GroupShareProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *GroupShare) SetHref(v string) { +// SetProperties sets field value +func (o *GroupShare) SetProperties(v GroupShareProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *GroupShare) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *GroupShare) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for GroupShareProperties will be returned -func (o *GroupShare) GetProperties() *GroupShareProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *GroupShare) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShare) GetPropertiesOk() (*GroupShareProperties, bool) { +func (o *GroupShare) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *GroupShare) SetProperties(v GroupShareProperties) { +// SetType sets field value +func (o *GroupShare) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *GroupShare) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *GroupShare) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *GroupShare) HasProperties() bool { func (o GroupShare) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share_properties.go index 5e356c127..1ca06267f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_share_properties.go @@ -41,7 +41,7 @@ func NewGroupSharePropertiesWithDefaults() *GroupShareProperties { } // GetEditPrivilege returns the EditPrivilege field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *GroupShareProperties) GetEditPrivilege() *bool { if o == nil { return nil @@ -79,7 +79,7 @@ func (o *GroupShareProperties) HasEditPrivilege() bool { } // GetSharePrivilege returns the SharePrivilege field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *GroupShareProperties) GetSharePrivilege() *bool { if o == nil { return nil @@ -121,9 +121,11 @@ func (o GroupShareProperties) MarshalJSON() ([]byte, error) { if o.EditPrivilege != nil { toSerialize["editPrivilege"] = o.EditPrivilege } + if o.SharePrivilege != nil { toSerialize["sharePrivilege"] = o.SharePrivilege } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_shares.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_shares.go index 6735473c8..091da39c5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_shares.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_shares.go @@ -16,14 +16,14 @@ import ( // GroupShares struct for GroupShares type GroupShares struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // Share representing groups and resource relationship - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]GroupShare `json:"items,omitempty"` + // Share representing groups and resource relationship + Type *Type `json:"type,omitempty"` } // NewGroupShares instantiates a new GroupShares object @@ -44,152 +44,152 @@ func NewGroupSharesWithDefaults() *GroupShares { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupShares) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *GroupShares) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShares) GetIdOk() (*string, bool) { +func (o *GroupShares) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *GroupShares) SetId(v string) { +// SetHref sets field value +func (o *GroupShares) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *GroupShares) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *GroupShares) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *GroupShares) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *GroupShares) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShares) GetTypeOk() (*Type, bool) { +func (o *GroupShares) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *GroupShares) SetType(v Type) { +// SetId sets field value +func (o *GroupShares) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *GroupShares) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *GroupShares) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupShares) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *GroupShares) GetItems() *[]GroupShare { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShares) GetHrefOk() (*string, bool) { +func (o *GroupShares) GetItemsOk() (*[]GroupShare, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *GroupShares) SetHref(v string) { +// SetItems sets field value +func (o *GroupShares) SetItems(v []GroupShare) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *GroupShares) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *GroupShares) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []GroupShare will be returned -func (o *GroupShares) GetItems() *[]GroupShare { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *GroupShares) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupShares) GetItemsOk() (*[]GroupShare, bool) { +func (o *GroupShares) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *GroupShares) SetItems(v []GroupShare) { +// SetType sets field value +func (o *GroupShares) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *GroupShares) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *GroupShares) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *GroupShares) HasItems() bool { func (o GroupShares) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_users.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_users.go index a2fb51bc2..4a4f075c1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_users.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_users.go @@ -16,14 +16,14 @@ import ( // GroupUsers Collection of the groups the user is a member of. type GroupUsers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Group `json:"items,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewGroupUsers instantiates a new GroupUsers object @@ -44,152 +44,152 @@ func NewGroupUsersWithDefaults() *GroupUsers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupUsers) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *GroupUsers) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupUsers) GetIdOk() (*string, bool) { +func (o *GroupUsers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *GroupUsers) SetId(v string) { +// SetHref sets field value +func (o *GroupUsers) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *GroupUsers) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *GroupUsers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *GroupUsers) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *GroupUsers) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupUsers) GetTypeOk() (*Type, bool) { +func (o *GroupUsers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *GroupUsers) SetType(v Type) { +// SetId sets field value +func (o *GroupUsers) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *GroupUsers) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *GroupUsers) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *GroupUsers) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *GroupUsers) GetItems() *[]Group { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupUsers) GetHrefOk() (*string, bool) { +func (o *GroupUsers) GetItemsOk() (*[]Group, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *GroupUsers) SetHref(v string) { +// SetItems sets field value +func (o *GroupUsers) SetItems(v []Group) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *GroupUsers) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *GroupUsers) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Group will be returned -func (o *GroupUsers) GetItems() *[]Group { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *GroupUsers) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *GroupUsers) GetItemsOk() (*[]Group, bool) { +func (o *GroupUsers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *GroupUsers) SetItems(v []Group) { +// SetType sets field value +func (o *GroupUsers) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *GroupUsers) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *GroupUsers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *GroupUsers) HasItems() bool { func (o GroupUsers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_groups.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_groups.go index 85df0a37d..578d27c1c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_groups.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_groups.go @@ -16,14 +16,14 @@ import ( // Groups struct for Groups type Groups struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Group `json:"items,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewGroups instantiates a new Groups object @@ -44,152 +44,152 @@ func NewGroupsWithDefaults() *Groups { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Groups) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Groups) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Groups) GetIdOk() (*string, bool) { +func (o *Groups) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Groups) SetId(v string) { +// SetHref sets field value +func (o *Groups) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Groups) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Groups) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Groups) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Groups) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Groups) GetTypeOk() (*Type, bool) { +func (o *Groups) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Groups) SetType(v Type) { +// SetId sets field value +func (o *Groups) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Groups) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Groups) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Groups) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Groups) GetItems() *[]Group { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Groups) GetHrefOk() (*string, bool) { +func (o *Groups) GetItemsOk() (*[]Group, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Groups) SetHref(v string) { +// SetItems sets field value +func (o *Groups) SetItems(v []Group) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Groups) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Groups) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Group will be returned -func (o *Groups) GetItems() *[]Group { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Groups) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Groups) GetItemsOk() (*[]Group, bool) { +func (o *Groups) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Groups) SetItems(v []Group) { +// SetType sets field value +func (o *Groups) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Groups) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Groups) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Groups) HasItems() bool { func (o Groups) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_image.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_image.go index 747a0e949..962c8e2e8 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_image.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_image.go @@ -16,14 +16,14 @@ import ( // Image struct for Image type Image struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ImageProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewImage instantiates a new Image object @@ -46,190 +46,190 @@ func NewImageWithDefaults() *Image { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Image) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Image) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Image) GetIdOk() (*string, bool) { +func (o *Image) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Image) SetId(v string) { +// SetHref sets field value +func (o *Image) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Image) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Image) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Image) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Image) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Image) GetTypeOk() (*Type, bool) { +func (o *Image) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Image) SetType(v Type) { +// SetId sets field value +func (o *Image) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Image) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Image) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Image) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Image) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Image) GetHrefOk() (*string, bool) { +func (o *Image) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Image) SetHref(v string) { +// SetMetadata sets field value +func (o *Image) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Image) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Image) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *Image) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Image) GetProperties() *ImageProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Image) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *Image) GetPropertiesOk() (*ImageProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Image) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *Image) SetProperties(v ImageProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Image) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Image) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ImageProperties will be returned -func (o *Image) GetProperties() *ImageProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Image) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Image) GetPropertiesOk() (*ImageProperties, bool) { +func (o *Image) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Image) SetProperties(v ImageProperties) { +// SetType sets field value +func (o *Image) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Image) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Image) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Image) HasProperties() bool { func (o Image) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go index 2ab0f5a65..f6059779e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go @@ -16,44 +16,44 @@ import ( // ImageProperties struct for ImageProperties type ImageProperties struct { - // The resource name. - Name *string `json:"name,omitempty"` - // Human-readable description. - Description *string `json:"description,omitempty"` - // The location of this image/snapshot. - Location *string `json:"location,omitempty"` - // The image size in GB. - Size *float32 `json:"size,omitempty"` + // Cloud init compatibility. + CloudInit *string `json:"cloudInit,omitempty"` // Hot-plug capable CPU (no reboot required). CpuHotPlug *bool `json:"cpuHotPlug,omitempty"` // Hot-unplug capable CPU (no reboot required). CpuHotUnplug *bool `json:"cpuHotUnplug,omitempty"` - // Hot-plug capable RAM (no reboot required). - RamHotPlug *bool `json:"ramHotPlug,omitempty"` - // Hot-unplug capable RAM (no reboot required). - RamHotUnplug *bool `json:"ramHotUnplug,omitempty"` - // Hot-plug capable NIC (no reboot required). - NicHotPlug *bool `json:"nicHotPlug,omitempty"` - // Hot-unplug capable NIC (no reboot required). - NicHotUnplug *bool `json:"nicHotUnplug,omitempty"` - // Hot-plug capable Virt-IO drive (no reboot required). - DiscVirtioHotPlug *bool `json:"discVirtioHotPlug,omitempty"` - // Hot-unplug capable Virt-IO drive (no reboot required). Not supported with Windows VMs. - DiscVirtioHotUnplug *bool `json:"discVirtioHotUnplug,omitempty"` + // Human-readable description. + Description *string `json:"description,omitempty"` // Hot-plug capable SCSI drive (no reboot required). DiscScsiHotPlug *bool `json:"discScsiHotPlug,omitempty"` // Hot-unplug capable SCSI drive (no reboot required). Not supported with Windows VMs. DiscScsiHotUnplug *bool `json:"discScsiHotUnplug,omitempty"` - // The OS type of this image. - LicenceType *string `json:"licenceType"` + // Hot-plug capable Virt-IO drive (no reboot required). + DiscVirtioHotPlug *bool `json:"discVirtioHotPlug,omitempty"` + // Hot-unplug capable Virt-IO drive (no reboot required). Not supported with Windows VMs. + DiscVirtioHotUnplug *bool `json:"discVirtioHotUnplug,omitempty"` + // List of image aliases mapped for this image + ImageAliases *[]string `json:"imageAliases,omitempty"` // The image type. ImageType *string `json:"imageType,omitempty"` + // The OS type of this image. + LicenceType *string `json:"licenceType"` + // The location of this image/snapshot. + Location *string `json:"location,omitempty"` + // The resource name. + Name *string `json:"name,omitempty"` + // Hot-plug capable NIC (no reboot required). + NicHotPlug *bool `json:"nicHotPlug,omitempty"` + // Hot-unplug capable NIC (no reboot required). + NicHotUnplug *bool `json:"nicHotUnplug,omitempty"` // Indicates whether the image is part of a public repository. Public *bool `json:"public,omitempty"` - // List of image aliases mapped for this image - ImageAliases *[]string `json:"imageAliases,omitempty"` - // Cloud init compatibility. - CloudInit *string `json:"cloudInit,omitempty"` + // Hot-plug capable RAM (no reboot required). + RamHotPlug *bool `json:"ramHotPlug,omitempty"` + // Hot-unplug capable RAM (no reboot required). + RamHotUnplug *bool `json:"ramHotUnplug,omitempty"` + // The image size in GB. + Size *float32 `json:"size,omitempty"` } // NewImageProperties instantiates a new ImageProperties object @@ -76,722 +76,722 @@ func NewImagePropertiesWithDefaults() *ImageProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ImageProperties) GetName() *string { +// GetCloudInit returns the CloudInit field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetCloudInit() *string { if o == nil { return nil } - return o.Name + return o.CloudInit } -// GetNameOk returns a tuple with the Name field value +// GetCloudInitOk returns a tuple with the CloudInit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetNameOk() (*string, bool) { +func (o *ImageProperties) GetCloudInitOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.CloudInit, true } -// SetName sets field value -func (o *ImageProperties) SetName(v string) { +// SetCloudInit sets field value +func (o *ImageProperties) SetCloudInit(v string) { - o.Name = &v + o.CloudInit = &v } -// HasName returns a boolean if a field has been set. -func (o *ImageProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasCloudInit returns a boolean if a field has been set. +func (o *ImageProperties) HasCloudInit() bool { + if o != nil && o.CloudInit != nil { return true } return false } -// GetDescription returns the Description field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ImageProperties) GetDescription() *string { +// GetCpuHotPlug returns the CpuHotPlug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetCpuHotPlug() *bool { if o == nil { return nil } - return o.Description + return o.CpuHotPlug } -// GetDescriptionOk returns a tuple with the Description field value +// GetCpuHotPlugOk returns a tuple with the CpuHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetDescriptionOk() (*string, bool) { +func (o *ImageProperties) GetCpuHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Description, true + return o.CpuHotPlug, true } -// SetDescription sets field value -func (o *ImageProperties) SetDescription(v string) { +// SetCpuHotPlug sets field value +func (o *ImageProperties) SetCpuHotPlug(v bool) { - o.Description = &v + o.CpuHotPlug = &v } -// HasDescription returns a boolean if a field has been set. -func (o *ImageProperties) HasDescription() bool { - if o != nil && o.Description != nil { +// HasCpuHotPlug returns a boolean if a field has been set. +func (o *ImageProperties) HasCpuHotPlug() bool { + if o != nil && o.CpuHotPlug != nil { return true } return false } -// GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ImageProperties) GetLocation() *string { +// GetCpuHotUnplug returns the CpuHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetCpuHotUnplug() *bool { if o == nil { return nil } - return o.Location + return o.CpuHotUnplug } -// GetLocationOk returns a tuple with the Location field value +// GetCpuHotUnplugOk returns a tuple with the CpuHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetLocationOk() (*string, bool) { +func (o *ImageProperties) GetCpuHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Location, true + return o.CpuHotUnplug, true } -// SetLocation sets field value -func (o *ImageProperties) SetLocation(v string) { +// SetCpuHotUnplug sets field value +func (o *ImageProperties) SetCpuHotUnplug(v bool) { - o.Location = &v + o.CpuHotUnplug = &v } -// HasLocation returns a boolean if a field has been set. -func (o *ImageProperties) HasLocation() bool { - if o != nil && o.Location != nil { +// HasCpuHotUnplug returns a boolean if a field has been set. +func (o *ImageProperties) HasCpuHotUnplug() bool { + if o != nil && o.CpuHotUnplug != nil { return true } return false } -// GetSize returns the Size field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *ImageProperties) GetSize() *float32 { +// GetDescription returns the Description field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetDescription() *string { if o == nil { return nil } - return o.Size + return o.Description } -// GetSizeOk returns a tuple with the Size field value +// GetDescriptionOk returns a tuple with the Description field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetSizeOk() (*float32, bool) { +func (o *ImageProperties) GetDescriptionOk() (*string, bool) { if o == nil { return nil, false } - return o.Size, true + return o.Description, true } -// SetSize sets field value -func (o *ImageProperties) SetSize(v float32) { +// SetDescription sets field value +func (o *ImageProperties) SetDescription(v string) { - o.Size = &v + o.Description = &v } -// HasSize returns a boolean if a field has been set. -func (o *ImageProperties) HasSize() bool { - if o != nil && o.Size != nil { +// HasDescription returns a boolean if a field has been set. +func (o *ImageProperties) HasDescription() bool { + if o != nil && o.Description != nil { return true } return false } -// GetCpuHotPlug returns the CpuHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetCpuHotPlug() *bool { +// GetDiscScsiHotPlug returns the DiscScsiHotPlug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetDiscScsiHotPlug() *bool { if o == nil { return nil } - return o.CpuHotPlug + return o.DiscScsiHotPlug } -// GetCpuHotPlugOk returns a tuple with the CpuHotPlug field value +// GetDiscScsiHotPlugOk returns a tuple with the DiscScsiHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetCpuHotPlugOk() (*bool, bool) { +func (o *ImageProperties) GetDiscScsiHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.CpuHotPlug, true + return o.DiscScsiHotPlug, true } -// SetCpuHotPlug sets field value -func (o *ImageProperties) SetCpuHotPlug(v bool) { +// SetDiscScsiHotPlug sets field value +func (o *ImageProperties) SetDiscScsiHotPlug(v bool) { - o.CpuHotPlug = &v + o.DiscScsiHotPlug = &v } -// HasCpuHotPlug returns a boolean if a field has been set. -func (o *ImageProperties) HasCpuHotPlug() bool { - if o != nil && o.CpuHotPlug != nil { +// HasDiscScsiHotPlug returns a boolean if a field has been set. +func (o *ImageProperties) HasDiscScsiHotPlug() bool { + if o != nil && o.DiscScsiHotPlug != nil { return true } return false } -// GetCpuHotUnplug returns the CpuHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetCpuHotUnplug() *bool { +// GetDiscScsiHotUnplug returns the DiscScsiHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetDiscScsiHotUnplug() *bool { if o == nil { return nil } - return o.CpuHotUnplug + return o.DiscScsiHotUnplug } -// GetCpuHotUnplugOk returns a tuple with the CpuHotUnplug field value +// GetDiscScsiHotUnplugOk returns a tuple with the DiscScsiHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetCpuHotUnplugOk() (*bool, bool) { +func (o *ImageProperties) GetDiscScsiHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.CpuHotUnplug, true + return o.DiscScsiHotUnplug, true } -// SetCpuHotUnplug sets field value -func (o *ImageProperties) SetCpuHotUnplug(v bool) { +// SetDiscScsiHotUnplug sets field value +func (o *ImageProperties) SetDiscScsiHotUnplug(v bool) { - o.CpuHotUnplug = &v + o.DiscScsiHotUnplug = &v } -// HasCpuHotUnplug returns a boolean if a field has been set. -func (o *ImageProperties) HasCpuHotUnplug() bool { - if o != nil && o.CpuHotUnplug != nil { +// HasDiscScsiHotUnplug returns a boolean if a field has been set. +func (o *ImageProperties) HasDiscScsiHotUnplug() bool { + if o != nil && o.DiscScsiHotUnplug != nil { return true } return false } -// GetRamHotPlug returns the RamHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetRamHotPlug() *bool { +// GetDiscVirtioHotPlug returns the DiscVirtioHotPlug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetDiscVirtioHotPlug() *bool { if o == nil { return nil } - return o.RamHotPlug + return o.DiscVirtioHotPlug } -// GetRamHotPlugOk returns a tuple with the RamHotPlug field value +// GetDiscVirtioHotPlugOk returns a tuple with the DiscVirtioHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetRamHotPlugOk() (*bool, bool) { +func (o *ImageProperties) GetDiscVirtioHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.RamHotPlug, true + return o.DiscVirtioHotPlug, true } -// SetRamHotPlug sets field value -func (o *ImageProperties) SetRamHotPlug(v bool) { +// SetDiscVirtioHotPlug sets field value +func (o *ImageProperties) SetDiscVirtioHotPlug(v bool) { - o.RamHotPlug = &v + o.DiscVirtioHotPlug = &v } -// HasRamHotPlug returns a boolean if a field has been set. -func (o *ImageProperties) HasRamHotPlug() bool { - if o != nil && o.RamHotPlug != nil { +// HasDiscVirtioHotPlug returns a boolean if a field has been set. +func (o *ImageProperties) HasDiscVirtioHotPlug() bool { + if o != nil && o.DiscVirtioHotPlug != nil { return true } return false } -// GetRamHotUnplug returns the RamHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetRamHotUnplug() *bool { +// GetDiscVirtioHotUnplug returns the DiscVirtioHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetDiscVirtioHotUnplug() *bool { if o == nil { return nil } - return o.RamHotUnplug + return o.DiscVirtioHotUnplug } -// GetRamHotUnplugOk returns a tuple with the RamHotUnplug field value +// GetDiscVirtioHotUnplugOk returns a tuple with the DiscVirtioHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetRamHotUnplugOk() (*bool, bool) { +func (o *ImageProperties) GetDiscVirtioHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.RamHotUnplug, true + return o.DiscVirtioHotUnplug, true } -// SetRamHotUnplug sets field value -func (o *ImageProperties) SetRamHotUnplug(v bool) { +// SetDiscVirtioHotUnplug sets field value +func (o *ImageProperties) SetDiscVirtioHotUnplug(v bool) { - o.RamHotUnplug = &v + o.DiscVirtioHotUnplug = &v } -// HasRamHotUnplug returns a boolean if a field has been set. -func (o *ImageProperties) HasRamHotUnplug() bool { - if o != nil && o.RamHotUnplug != nil { +// HasDiscVirtioHotUnplug returns a boolean if a field has been set. +func (o *ImageProperties) HasDiscVirtioHotUnplug() bool { + if o != nil && o.DiscVirtioHotUnplug != nil { return true } return false } -// GetNicHotPlug returns the NicHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetNicHotPlug() *bool { +// GetImageAliases returns the ImageAliases field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetImageAliases() *[]string { if o == nil { return nil } - return o.NicHotPlug + return o.ImageAliases } -// GetNicHotPlugOk returns a tuple with the NicHotPlug field value +// GetImageAliasesOk returns a tuple with the ImageAliases field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetNicHotPlugOk() (*bool, bool) { +func (o *ImageProperties) GetImageAliasesOk() (*[]string, bool) { if o == nil { return nil, false } - return o.NicHotPlug, true + return o.ImageAliases, true } -// SetNicHotPlug sets field value -func (o *ImageProperties) SetNicHotPlug(v bool) { +// SetImageAliases sets field value +func (o *ImageProperties) SetImageAliases(v []string) { - o.NicHotPlug = &v + o.ImageAliases = &v } -// HasNicHotPlug returns a boolean if a field has been set. -func (o *ImageProperties) HasNicHotPlug() bool { - if o != nil && o.NicHotPlug != nil { +// HasImageAliases returns a boolean if a field has been set. +func (o *ImageProperties) HasImageAliases() bool { + if o != nil && o.ImageAliases != nil { return true } return false } -// GetNicHotUnplug returns the NicHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetNicHotUnplug() *bool { +// GetImageType returns the ImageType field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetImageType() *string { if o == nil { return nil } - return o.NicHotUnplug + return o.ImageType } -// GetNicHotUnplugOk returns a tuple with the NicHotUnplug field value +// GetImageTypeOk returns a tuple with the ImageType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetNicHotUnplugOk() (*bool, bool) { +func (o *ImageProperties) GetImageTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.NicHotUnplug, true + return o.ImageType, true } -// SetNicHotUnplug sets field value -func (o *ImageProperties) SetNicHotUnplug(v bool) { +// SetImageType sets field value +func (o *ImageProperties) SetImageType(v string) { - o.NicHotUnplug = &v + o.ImageType = &v } -// HasNicHotUnplug returns a boolean if a field has been set. -func (o *ImageProperties) HasNicHotUnplug() bool { - if o != nil && o.NicHotUnplug != nil { +// HasImageType returns a boolean if a field has been set. +func (o *ImageProperties) HasImageType() bool { + if o != nil && o.ImageType != nil { return true } return false } -// GetDiscVirtioHotPlug returns the DiscVirtioHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetDiscVirtioHotPlug() *bool { +// GetLicenceType returns the LicenceType field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetLicenceType() *string { if o == nil { return nil } - return o.DiscVirtioHotPlug + return o.LicenceType } -// GetDiscVirtioHotPlugOk returns a tuple with the DiscVirtioHotPlug field value +// GetLicenceTypeOk returns a tuple with the LicenceType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetDiscVirtioHotPlugOk() (*bool, bool) { +func (o *ImageProperties) GetLicenceTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.DiscVirtioHotPlug, true + return o.LicenceType, true } -// SetDiscVirtioHotPlug sets field value -func (o *ImageProperties) SetDiscVirtioHotPlug(v bool) { +// SetLicenceType sets field value +func (o *ImageProperties) SetLicenceType(v string) { - o.DiscVirtioHotPlug = &v + o.LicenceType = &v } -// HasDiscVirtioHotPlug returns a boolean if a field has been set. -func (o *ImageProperties) HasDiscVirtioHotPlug() bool { - if o != nil && o.DiscVirtioHotPlug != nil { +// HasLicenceType returns a boolean if a field has been set. +func (o *ImageProperties) HasLicenceType() bool { + if o != nil && o.LicenceType != nil { return true } return false } -// GetDiscVirtioHotUnplug returns the DiscVirtioHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetDiscVirtioHotUnplug() *bool { +// GetLocation returns the Location field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetLocation() *string { if o == nil { return nil } - return o.DiscVirtioHotUnplug + return o.Location } -// GetDiscVirtioHotUnplugOk returns a tuple with the DiscVirtioHotUnplug field value +// GetLocationOk returns a tuple with the Location field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetDiscVirtioHotUnplugOk() (*bool, bool) { +func (o *ImageProperties) GetLocationOk() (*string, bool) { if o == nil { return nil, false } - return o.DiscVirtioHotUnplug, true + return o.Location, true } -// SetDiscVirtioHotUnplug sets field value -func (o *ImageProperties) SetDiscVirtioHotUnplug(v bool) { +// SetLocation sets field value +func (o *ImageProperties) SetLocation(v string) { - o.DiscVirtioHotUnplug = &v + o.Location = &v } -// HasDiscVirtioHotUnplug returns a boolean if a field has been set. -func (o *ImageProperties) HasDiscVirtioHotUnplug() bool { - if o != nil && o.DiscVirtioHotUnplug != nil { +// HasLocation returns a boolean if a field has been set. +func (o *ImageProperties) HasLocation() bool { + if o != nil && o.Location != nil { return true } return false } -// GetDiscScsiHotPlug returns the DiscScsiHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetDiscScsiHotPlug() *bool { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetName() *string { if o == nil { return nil } - return o.DiscScsiHotPlug + return o.Name } -// GetDiscScsiHotPlugOk returns a tuple with the DiscScsiHotPlug field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetDiscScsiHotPlugOk() (*bool, bool) { +func (o *ImageProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.DiscScsiHotPlug, true + return o.Name, true } -// SetDiscScsiHotPlug sets field value -func (o *ImageProperties) SetDiscScsiHotPlug(v bool) { +// SetName sets field value +func (o *ImageProperties) SetName(v string) { - o.DiscScsiHotPlug = &v + o.Name = &v } -// HasDiscScsiHotPlug returns a boolean if a field has been set. -func (o *ImageProperties) HasDiscScsiHotPlug() bool { - if o != nil && o.DiscScsiHotPlug != nil { +// HasName returns a boolean if a field has been set. +func (o *ImageProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetDiscScsiHotUnplug returns the DiscScsiHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetDiscScsiHotUnplug() *bool { +// GetNicHotPlug returns the NicHotPlug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetNicHotPlug() *bool { if o == nil { return nil } - return o.DiscScsiHotUnplug + return o.NicHotPlug } -// GetDiscScsiHotUnplugOk returns a tuple with the DiscScsiHotUnplug field value +// GetNicHotPlugOk returns a tuple with the NicHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetDiscScsiHotUnplugOk() (*bool, bool) { +func (o *ImageProperties) GetNicHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.DiscScsiHotUnplug, true + return o.NicHotPlug, true } -// SetDiscScsiHotUnplug sets field value -func (o *ImageProperties) SetDiscScsiHotUnplug(v bool) { +// SetNicHotPlug sets field value +func (o *ImageProperties) SetNicHotPlug(v bool) { - o.DiscScsiHotUnplug = &v + o.NicHotPlug = &v } -// HasDiscScsiHotUnplug returns a boolean if a field has been set. -func (o *ImageProperties) HasDiscScsiHotUnplug() bool { - if o != nil && o.DiscScsiHotUnplug != nil { +// HasNicHotPlug returns a boolean if a field has been set. +func (o *ImageProperties) HasNicHotPlug() bool { + if o != nil && o.NicHotPlug != nil { return true } return false } -// GetLicenceType returns the LicenceType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ImageProperties) GetLicenceType() *string { +// GetNicHotUnplug returns the NicHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetNicHotUnplug() *bool { if o == nil { return nil } - return o.LicenceType + return o.NicHotUnplug } -// GetLicenceTypeOk returns a tuple with the LicenceType field value +// GetNicHotUnplugOk returns a tuple with the NicHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetLicenceTypeOk() (*string, bool) { +func (o *ImageProperties) GetNicHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.LicenceType, true + return o.NicHotUnplug, true } -// SetLicenceType sets field value -func (o *ImageProperties) SetLicenceType(v string) { +// SetNicHotUnplug sets field value +func (o *ImageProperties) SetNicHotUnplug(v bool) { - o.LicenceType = &v + o.NicHotUnplug = &v } -// HasLicenceType returns a boolean if a field has been set. -func (o *ImageProperties) HasLicenceType() bool { - if o != nil && o.LicenceType != nil { +// HasNicHotUnplug returns a boolean if a field has been set. +func (o *ImageProperties) HasNicHotUnplug() bool { + if o != nil && o.NicHotUnplug != nil { return true } return false } -// GetImageType returns the ImageType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ImageProperties) GetImageType() *string { +// GetPublic returns the Public field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetPublic() *bool { if o == nil { return nil } - return o.ImageType + return o.Public } -// GetImageTypeOk returns a tuple with the ImageType field value +// GetPublicOk returns a tuple with the Public field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetImageTypeOk() (*string, bool) { +func (o *ImageProperties) GetPublicOk() (*bool, bool) { if o == nil { return nil, false } - return o.ImageType, true + return o.Public, true } -// SetImageType sets field value -func (o *ImageProperties) SetImageType(v string) { +// SetPublic sets field value +func (o *ImageProperties) SetPublic(v bool) { - o.ImageType = &v + o.Public = &v } -// HasImageType returns a boolean if a field has been set. -func (o *ImageProperties) HasImageType() bool { - if o != nil && o.ImageType != nil { +// HasPublic returns a boolean if a field has been set. +func (o *ImageProperties) HasPublic() bool { + if o != nil && o.Public != nil { return true } return false } -// GetPublic returns the Public field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *ImageProperties) GetPublic() *bool { +// GetRamHotPlug returns the RamHotPlug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetRamHotPlug() *bool { if o == nil { return nil } - return o.Public + return o.RamHotPlug } -// GetPublicOk returns a tuple with the Public field value +// GetRamHotPlugOk returns a tuple with the RamHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetPublicOk() (*bool, bool) { +func (o *ImageProperties) GetRamHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Public, true + return o.RamHotPlug, true } -// SetPublic sets field value -func (o *ImageProperties) SetPublic(v bool) { +// SetRamHotPlug sets field value +func (o *ImageProperties) SetRamHotPlug(v bool) { - o.Public = &v + o.RamHotPlug = &v } -// HasPublic returns a boolean if a field has been set. -func (o *ImageProperties) HasPublic() bool { - if o != nil && o.Public != nil { +// HasRamHotPlug returns a boolean if a field has been set. +func (o *ImageProperties) HasRamHotPlug() bool { + if o != nil && o.RamHotPlug != nil { return true } return false } -// GetImageAliases returns the ImageAliases field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *ImageProperties) GetImageAliases() *[]string { +// GetRamHotUnplug returns the RamHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetRamHotUnplug() *bool { if o == nil { return nil } - return o.ImageAliases + return o.RamHotUnplug } -// GetImageAliasesOk returns a tuple with the ImageAliases field value +// GetRamHotUnplugOk returns a tuple with the RamHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetImageAliasesOk() (*[]string, bool) { +func (o *ImageProperties) GetRamHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.ImageAliases, true + return o.RamHotUnplug, true } -// SetImageAliases sets field value -func (o *ImageProperties) SetImageAliases(v []string) { +// SetRamHotUnplug sets field value +func (o *ImageProperties) SetRamHotUnplug(v bool) { - o.ImageAliases = &v + o.RamHotUnplug = &v } -// HasImageAliases returns a boolean if a field has been set. -func (o *ImageProperties) HasImageAliases() bool { - if o != nil && o.ImageAliases != nil { +// HasRamHotUnplug returns a boolean if a field has been set. +func (o *ImageProperties) HasRamHotUnplug() bool { + if o != nil && o.RamHotUnplug != nil { return true } return false } -// GetCloudInit returns the CloudInit field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ImageProperties) GetCloudInit() *string { +// GetSize returns the Size field value +// If the value is explicit nil, nil is returned +func (o *ImageProperties) GetSize() *float32 { if o == nil { return nil } - return o.CloudInit + return o.Size } -// GetCloudInitOk returns a tuple with the CloudInit field value +// GetSizeOk returns a tuple with the Size field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ImageProperties) GetCloudInitOk() (*string, bool) { +func (o *ImageProperties) GetSizeOk() (*float32, bool) { if o == nil { return nil, false } - return o.CloudInit, true + return o.Size, true } -// SetCloudInit sets field value -func (o *ImageProperties) SetCloudInit(v string) { +// SetSize sets field value +func (o *ImageProperties) SetSize(v float32) { - o.CloudInit = &v + o.Size = &v } -// HasCloudInit returns a boolean if a field has been set. -func (o *ImageProperties) HasCloudInit() bool { - if o != nil && o.CloudInit != nil { +// HasSize returns a boolean if a field has been set. +func (o *ImageProperties) HasSize() bool { + if o != nil && o.Size != nil { return true } @@ -800,63 +800,82 @@ func (o *ImageProperties) HasCloudInit() bool { func (o ImageProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } - if o.Description != nil { - toSerialize["description"] = o.Description - } - if o.Location != nil { - toSerialize["location"] = o.Location - } - if o.Size != nil { - toSerialize["size"] = o.Size + if o.CloudInit != nil { + toSerialize["cloudInit"] = o.CloudInit } + if o.CpuHotPlug != nil { toSerialize["cpuHotPlug"] = o.CpuHotPlug } + if o.CpuHotUnplug != nil { toSerialize["cpuHotUnplug"] = o.CpuHotUnplug } - if o.RamHotPlug != nil { - toSerialize["ramHotPlug"] = o.RamHotPlug - } - if o.RamHotUnplug != nil { - toSerialize["ramHotUnplug"] = o.RamHotUnplug + + if o.Description != nil { + toSerialize["description"] = o.Description } - if o.NicHotPlug != nil { - toSerialize["nicHotPlug"] = o.NicHotPlug + + if o.DiscScsiHotPlug != nil { + toSerialize["discScsiHotPlug"] = o.DiscScsiHotPlug } - if o.NicHotUnplug != nil { - toSerialize["nicHotUnplug"] = o.NicHotUnplug + + if o.DiscScsiHotUnplug != nil { + toSerialize["discScsiHotUnplug"] = o.DiscScsiHotUnplug } + if o.DiscVirtioHotPlug != nil { toSerialize["discVirtioHotPlug"] = o.DiscVirtioHotPlug } + if o.DiscVirtioHotUnplug != nil { toSerialize["discVirtioHotUnplug"] = o.DiscVirtioHotUnplug } - if o.DiscScsiHotPlug != nil { - toSerialize["discScsiHotPlug"] = o.DiscScsiHotPlug + + if o.ImageAliases != nil { + toSerialize["imageAliases"] = o.ImageAliases } - if o.DiscScsiHotUnplug != nil { - toSerialize["discScsiHotUnplug"] = o.DiscScsiHotUnplug + + if o.ImageType != nil { + toSerialize["imageType"] = o.ImageType } + if o.LicenceType != nil { toSerialize["licenceType"] = o.LicenceType } - if o.ImageType != nil { - toSerialize["imageType"] = o.ImageType + + if o.Location != nil { + toSerialize["location"] = o.Location + } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.NicHotPlug != nil { + toSerialize["nicHotPlug"] = o.NicHotPlug } + + if o.NicHotUnplug != nil { + toSerialize["nicHotUnplug"] = o.NicHotUnplug + } + if o.Public != nil { toSerialize["public"] = o.Public } - if o.ImageAliases != nil { - toSerialize["imageAliases"] = o.ImageAliases + + if o.RamHotPlug != nil { + toSerialize["ramHotPlug"] = o.RamHotPlug } - if o.CloudInit != nil { - toSerialize["cloudInit"] = o.CloudInit + + if o.RamHotUnplug != nil { + toSerialize["ramHotUnplug"] = o.RamHotUnplug } + + if o.Size != nil { + toSerialize["size"] = o.Size + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go index ff496fb19..1cec4020e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go @@ -16,14 +16,14 @@ import ( // Images struct for Images type Images struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Image `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewImages instantiates a new Images object @@ -44,152 +44,152 @@ func NewImagesWithDefaults() *Images { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Images) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Images) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Images) GetIdOk() (*string, bool) { +func (o *Images) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Images) SetId(v string) { +// SetHref sets field value +func (o *Images) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Images) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Images) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Images) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Images) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Images) GetTypeOk() (*Type, bool) { +func (o *Images) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Images) SetType(v Type) { +// SetId sets field value +func (o *Images) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Images) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Images) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Images) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Images) GetItems() *[]Image { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Images) GetHrefOk() (*string, bool) { +func (o *Images) GetItemsOk() (*[]Image, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Images) SetHref(v string) { +// SetItems sets field value +func (o *Images) SetItems(v []Image) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Images) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Images) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Image will be returned -func (o *Images) GetItems() *[]Image { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Images) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Images) GetItemsOk() (*[]Image, bool) { +func (o *Images) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Images) SetItems(v []Image) { +// SetType sets field value +func (o *Images) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Images) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Images) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Images) HasItems() bool { func (o Images) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go index 19f4a1467..2ada276b5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go @@ -43,7 +43,7 @@ func NewInfoWithDefaults() *Info { } // GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *Info) GetHref() *string { if o == nil { return nil @@ -81,7 +81,7 @@ func (o *Info) HasHref() bool { } // GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *Info) GetName() *string { if o == nil { return nil @@ -119,7 +119,7 @@ func (o *Info) HasName() bool { } // GetVersion returns the Version field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *Info) GetVersion() *string { if o == nil { return nil @@ -161,12 +161,15 @@ func (o Info) MarshalJSON() ([]byte, error) { if o.Href != nil { toSerialize["href"] = o.Href } + if o.Name != nil { toSerialize["name"] = o.Name } + if o.Version != nil { toSerialize["version"] = o.Version } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block.go index af07f6fb1..c7e16a0f2 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block.go @@ -16,14 +16,14 @@ import ( // IpBlock struct for IpBlock type IpBlock struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *IpBlockProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewIpBlock instantiates a new IpBlock object @@ -46,190 +46,190 @@ func NewIpBlockWithDefaults() *IpBlock { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpBlock) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *IpBlock) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlock) GetIdOk() (*string, bool) { +func (o *IpBlock) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *IpBlock) SetId(v string) { +// SetHref sets field value +func (o *IpBlock) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *IpBlock) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *IpBlock) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *IpBlock) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *IpBlock) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlock) GetTypeOk() (*Type, bool) { +func (o *IpBlock) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *IpBlock) SetType(v Type) { +// SetId sets field value +func (o *IpBlock) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *IpBlock) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *IpBlock) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpBlock) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *IpBlock) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlock) GetHrefOk() (*string, bool) { +func (o *IpBlock) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *IpBlock) SetHref(v string) { +// SetMetadata sets field value +func (o *IpBlock) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *IpBlock) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *IpBlock) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *IpBlock) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *IpBlock) GetProperties() *IpBlockProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlock) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *IpBlock) GetPropertiesOk() (*IpBlockProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *IpBlock) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *IpBlock) SetProperties(v IpBlockProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *IpBlock) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *IpBlock) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for IpBlockProperties will be returned -func (o *IpBlock) GetProperties() *IpBlockProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *IpBlock) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlock) GetPropertiesOk() (*IpBlockProperties, bool) { +func (o *IpBlock) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *IpBlock) SetProperties(v IpBlockProperties) { +// SetType sets field value +func (o *IpBlock) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *IpBlock) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *IpBlock) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *IpBlock) HasProperties() bool { func (o IpBlock) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block_properties.go index 4f732b53b..19c3ed757 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_block_properties.go @@ -16,16 +16,16 @@ import ( // IpBlockProperties struct for IpBlockProperties type IpBlockProperties struct { + // Read-Only attribute. Lists consumption detail for an individual IP + IpConsumers *[]IpConsumer `json:"ipConsumers,omitempty"` // Collection of IPs, associated with the IP Block. Ips *[]string `json:"ips,omitempty"` // Location of that IP block. Property cannot be modified after it is created (disallowed in update requests). Location *string `json:"location"` - // The size of the IP block. - Size *int32 `json:"size"` // The name of the resource. Name *string `json:"name,omitempty"` - // Read-Only attribute. Lists consumption detail for an individual IP - IpConsumers *[]IpConsumer `json:"ipConsumers,omitempty"` + // The size of the IP block. + Size *int32 `json:"size"` } // NewIpBlockProperties instantiates a new IpBlockProperties object @@ -49,114 +49,114 @@ func NewIpBlockPropertiesWithDefaults() *IpBlockProperties { return &this } -// GetIps returns the Ips field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *IpBlockProperties) GetIps() *[]string { +// GetIpConsumers returns the IpConsumers field value +// If the value is explicit nil, nil is returned +func (o *IpBlockProperties) GetIpConsumers() *[]IpConsumer { if o == nil { return nil } - return o.Ips + return o.IpConsumers } -// GetIpsOk returns a tuple with the Ips field value +// GetIpConsumersOk returns a tuple with the IpConsumers field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlockProperties) GetIpsOk() (*[]string, bool) { +func (o *IpBlockProperties) GetIpConsumersOk() (*[]IpConsumer, bool) { if o == nil { return nil, false } - return o.Ips, true + return o.IpConsumers, true } -// SetIps sets field value -func (o *IpBlockProperties) SetIps(v []string) { +// SetIpConsumers sets field value +func (o *IpBlockProperties) SetIpConsumers(v []IpConsumer) { - o.Ips = &v + o.IpConsumers = &v } -// HasIps returns a boolean if a field has been set. -func (o *IpBlockProperties) HasIps() bool { - if o != nil && o.Ips != nil { +// HasIpConsumers returns a boolean if a field has been set. +func (o *IpBlockProperties) HasIpConsumers() bool { + if o != nil && o.IpConsumers != nil { return true } return false } -// GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpBlockProperties) GetLocation() *string { +// GetIps returns the Ips field value +// If the value is explicit nil, nil is returned +func (o *IpBlockProperties) GetIps() *[]string { if o == nil { return nil } - return o.Location + return o.Ips } -// GetLocationOk returns a tuple with the Location field value +// GetIpsOk returns a tuple with the Ips field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlockProperties) GetLocationOk() (*string, bool) { +func (o *IpBlockProperties) GetIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Location, true + return o.Ips, true } -// SetLocation sets field value -func (o *IpBlockProperties) SetLocation(v string) { +// SetIps sets field value +func (o *IpBlockProperties) SetIps(v []string) { - o.Location = &v + o.Ips = &v } -// HasLocation returns a boolean if a field has been set. -func (o *IpBlockProperties) HasLocation() bool { - if o != nil && o.Location != nil { +// HasIps returns a boolean if a field has been set. +func (o *IpBlockProperties) HasIps() bool { + if o != nil && o.Ips != nil { return true } return false } -// GetSize returns the Size field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *IpBlockProperties) GetSize() *int32 { +// GetLocation returns the Location field value +// If the value is explicit nil, nil is returned +func (o *IpBlockProperties) GetLocation() *string { if o == nil { return nil } - return o.Size + return o.Location } -// GetSizeOk returns a tuple with the Size field value +// GetLocationOk returns a tuple with the Location field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlockProperties) GetSizeOk() (*int32, bool) { +func (o *IpBlockProperties) GetLocationOk() (*string, bool) { if o == nil { return nil, false } - return o.Size, true + return o.Location, true } -// SetSize sets field value -func (o *IpBlockProperties) SetSize(v int32) { +// SetLocation sets field value +func (o *IpBlockProperties) SetLocation(v string) { - o.Size = &v + o.Location = &v } -// HasSize returns a boolean if a field has been set. -func (o *IpBlockProperties) HasSize() bool { - if o != nil && o.Size != nil { +// HasLocation returns a boolean if a field has been set. +func (o *IpBlockProperties) HasLocation() bool { + if o != nil && o.Location != nil { return true } @@ -164,7 +164,7 @@ func (o *IpBlockProperties) HasSize() bool { } // GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *IpBlockProperties) GetName() *string { if o == nil { return nil @@ -201,38 +201,38 @@ func (o *IpBlockProperties) HasName() bool { return false } -// GetIpConsumers returns the IpConsumers field value -// If the value is explicit nil, the zero value for []IpConsumer will be returned -func (o *IpBlockProperties) GetIpConsumers() *[]IpConsumer { +// GetSize returns the Size field value +// If the value is explicit nil, nil is returned +func (o *IpBlockProperties) GetSize() *int32 { if o == nil { return nil } - return o.IpConsumers + return o.Size } -// GetIpConsumersOk returns a tuple with the IpConsumers field value +// GetSizeOk returns a tuple with the Size field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlockProperties) GetIpConsumersOk() (*[]IpConsumer, bool) { +func (o *IpBlockProperties) GetSizeOk() (*int32, bool) { if o == nil { return nil, false } - return o.IpConsumers, true + return o.Size, true } -// SetIpConsumers sets field value -func (o *IpBlockProperties) SetIpConsumers(v []IpConsumer) { +// SetSize sets field value +func (o *IpBlockProperties) SetSize(v int32) { - o.IpConsumers = &v + o.Size = &v } -// HasIpConsumers returns a boolean if a field has been set. -func (o *IpBlockProperties) HasIpConsumers() bool { - if o != nil && o.IpConsumers != nil { +// HasSize returns a boolean if a field has been set. +func (o *IpBlockProperties) HasSize() bool { + if o != nil && o.Size != nil { return true } @@ -241,21 +241,26 @@ func (o *IpBlockProperties) HasIpConsumers() bool { func (o IpBlockProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.IpConsumers != nil { + toSerialize["ipConsumers"] = o.IpConsumers + } + if o.Ips != nil { toSerialize["ips"] = o.Ips } + if o.Location != nil { toSerialize["location"] = o.Location } - if o.Size != nil { - toSerialize["size"] = o.Size - } + if o.Name != nil { toSerialize["name"] = o.Name } - if o.IpConsumers != nil { - toSerialize["ipConsumers"] = o.IpConsumers + + if o.Size != nil { + toSerialize["size"] = o.Size } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_blocks.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_blocks.go index 010dc17fe..117235c82 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_blocks.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_blocks.go @@ -16,19 +16,19 @@ import ( // IpBlocks struct for IpBlocks type IpBlocks struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]IpBlock `json:"items,omitempty"` + // The limit, specified in the request (if not specified, the endpoint's default pagination limit is used). + Limit *float32 `json:"limit,omitempty"` // The offset, specified in the request (if not is specified, 0 is used by default). Offset *float32 `json:"offset,omitempty"` - // The limit, specified in the request (if not specified, the endpoint's default pagination limit is used). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewIpBlocks instantiates a new IpBlocks object @@ -49,114 +49,114 @@ func NewIpBlocksWithDefaults() *IpBlocks { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpBlocks) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *IpBlocks) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlocks) GetIdOk() (*string, bool) { +func (o *IpBlocks) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *IpBlocks) SetId(v string) { +// SetLinks sets field value +func (o *IpBlocks) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *IpBlocks) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *IpBlocks) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *IpBlocks) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *IpBlocks) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlocks) GetTypeOk() (*Type, bool) { +func (o *IpBlocks) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *IpBlocks) SetType(v Type) { +// SetHref sets field value +func (o *IpBlocks) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *IpBlocks) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *IpBlocks) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpBlocks) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *IpBlocks) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlocks) GetHrefOk() (*string, bool) { +func (o *IpBlocks) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *IpBlocks) SetHref(v string) { +// SetId sets field value +func (o *IpBlocks) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *IpBlocks) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *IpBlocks) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *IpBlocks) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []IpBlock will be returned +// If the value is explicit nil, nil is returned func (o *IpBlocks) GetItems() *[]IpBlock { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *IpBlocks) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *IpBlocks) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *IpBlocks) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlocks) GetOffsetOk() (*float32, bool) { +func (o *IpBlocks) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *IpBlocks) SetOffset(v float32) { +// SetLimit sets field value +func (o *IpBlocks) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *IpBlocks) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *IpBlocks) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *IpBlocks) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *IpBlocks) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlocks) GetLimitOk() (*float32, bool) { +func (o *IpBlocks) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *IpBlocks) SetLimit(v float32) { +// SetOffset sets field value +func (o *IpBlocks) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *IpBlocks) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *IpBlocks) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *IpBlocks) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *IpBlocks) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpBlocks) GetLinksOk() (*PaginationLinks, bool) { +func (o *IpBlocks) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *IpBlocks) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *IpBlocks) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *IpBlocks) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *IpBlocks) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *IpBlocks) HasLinks() bool { func (o IpBlocks) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_consumer.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_consumer.go index a0e9e55c6..e405979fe 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_consumer.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_consumer.go @@ -16,15 +16,15 @@ import ( // IpConsumer struct for IpConsumer type IpConsumer struct { + DatacenterId *string `json:"datacenterId,omitempty"` + DatacenterName *string `json:"datacenterName,omitempty"` Ip *string `json:"ip,omitempty"` + K8sClusterUuid *string `json:"k8sClusterUuid,omitempty"` + K8sNodePoolUuid *string `json:"k8sNodePoolUuid,omitempty"` Mac *string `json:"mac,omitempty"` NicId *string `json:"nicId,omitempty"` ServerId *string `json:"serverId,omitempty"` ServerName *string `json:"serverName,omitempty"` - DatacenterId *string `json:"datacenterId,omitempty"` - DatacenterName *string `json:"datacenterName,omitempty"` - K8sNodePoolUuid *string `json:"k8sNodePoolUuid,omitempty"` - K8sClusterUuid *string `json:"k8sClusterUuid,omitempty"` } // NewIpConsumer instantiates a new IpConsumer object @@ -45,342 +45,342 @@ func NewIpConsumerWithDefaults() *IpConsumer { return &this } -// GetIp returns the Ip field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetIp() *string { +// GetDatacenterId returns the DatacenterId field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetDatacenterId() *string { if o == nil { return nil } - return o.Ip + return o.DatacenterId } -// GetIpOk returns a tuple with the Ip field value +// GetDatacenterIdOk returns a tuple with the DatacenterId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetIpOk() (*string, bool) { +func (o *IpConsumer) GetDatacenterIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Ip, true + return o.DatacenterId, true } -// SetIp sets field value -func (o *IpConsumer) SetIp(v string) { +// SetDatacenterId sets field value +func (o *IpConsumer) SetDatacenterId(v string) { - o.Ip = &v + o.DatacenterId = &v } -// HasIp returns a boolean if a field has been set. -func (o *IpConsumer) HasIp() bool { - if o != nil && o.Ip != nil { +// HasDatacenterId returns a boolean if a field has been set. +func (o *IpConsumer) HasDatacenterId() bool { + if o != nil && o.DatacenterId != nil { return true } return false } -// GetMac returns the Mac field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetMac() *string { +// GetDatacenterName returns the DatacenterName field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetDatacenterName() *string { if o == nil { return nil } - return o.Mac + return o.DatacenterName } -// GetMacOk returns a tuple with the Mac field value +// GetDatacenterNameOk returns a tuple with the DatacenterName field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetMacOk() (*string, bool) { +func (o *IpConsumer) GetDatacenterNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Mac, true + return o.DatacenterName, true } -// SetMac sets field value -func (o *IpConsumer) SetMac(v string) { +// SetDatacenterName sets field value +func (o *IpConsumer) SetDatacenterName(v string) { - o.Mac = &v + o.DatacenterName = &v } -// HasMac returns a boolean if a field has been set. -func (o *IpConsumer) HasMac() bool { - if o != nil && o.Mac != nil { +// HasDatacenterName returns a boolean if a field has been set. +func (o *IpConsumer) HasDatacenterName() bool { + if o != nil && o.DatacenterName != nil { return true } return false } -// GetNicId returns the NicId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetNicId() *string { +// GetIp returns the Ip field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetIp() *string { if o == nil { return nil } - return o.NicId + return o.Ip } -// GetNicIdOk returns a tuple with the NicId field value +// GetIpOk returns a tuple with the Ip field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetNicIdOk() (*string, bool) { +func (o *IpConsumer) GetIpOk() (*string, bool) { if o == nil { return nil, false } - return o.NicId, true + return o.Ip, true } -// SetNicId sets field value -func (o *IpConsumer) SetNicId(v string) { +// SetIp sets field value +func (o *IpConsumer) SetIp(v string) { - o.NicId = &v + o.Ip = &v } -// HasNicId returns a boolean if a field has been set. -func (o *IpConsumer) HasNicId() bool { - if o != nil && o.NicId != nil { +// HasIp returns a boolean if a field has been set. +func (o *IpConsumer) HasIp() bool { + if o != nil && o.Ip != nil { return true } return false } -// GetServerId returns the ServerId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetServerId() *string { +// GetK8sClusterUuid returns the K8sClusterUuid field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetK8sClusterUuid() *string { if o == nil { return nil } - return o.ServerId + return o.K8sClusterUuid } -// GetServerIdOk returns a tuple with the ServerId field value +// GetK8sClusterUuidOk returns a tuple with the K8sClusterUuid field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetServerIdOk() (*string, bool) { +func (o *IpConsumer) GetK8sClusterUuidOk() (*string, bool) { if o == nil { return nil, false } - return o.ServerId, true + return o.K8sClusterUuid, true } -// SetServerId sets field value -func (o *IpConsumer) SetServerId(v string) { +// SetK8sClusterUuid sets field value +func (o *IpConsumer) SetK8sClusterUuid(v string) { - o.ServerId = &v + o.K8sClusterUuid = &v } -// HasServerId returns a boolean if a field has been set. -func (o *IpConsumer) HasServerId() bool { - if o != nil && o.ServerId != nil { +// HasK8sClusterUuid returns a boolean if a field has been set. +func (o *IpConsumer) HasK8sClusterUuid() bool { + if o != nil && o.K8sClusterUuid != nil { return true } return false } -// GetServerName returns the ServerName field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetServerName() *string { +// GetK8sNodePoolUuid returns the K8sNodePoolUuid field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetK8sNodePoolUuid() *string { if o == nil { return nil } - return o.ServerName + return o.K8sNodePoolUuid } -// GetServerNameOk returns a tuple with the ServerName field value +// GetK8sNodePoolUuidOk returns a tuple with the K8sNodePoolUuid field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetServerNameOk() (*string, bool) { +func (o *IpConsumer) GetK8sNodePoolUuidOk() (*string, bool) { if o == nil { return nil, false } - return o.ServerName, true + return o.K8sNodePoolUuid, true } -// SetServerName sets field value -func (o *IpConsumer) SetServerName(v string) { +// SetK8sNodePoolUuid sets field value +func (o *IpConsumer) SetK8sNodePoolUuid(v string) { - o.ServerName = &v + o.K8sNodePoolUuid = &v } -// HasServerName returns a boolean if a field has been set. -func (o *IpConsumer) HasServerName() bool { - if o != nil && o.ServerName != nil { +// HasK8sNodePoolUuid returns a boolean if a field has been set. +func (o *IpConsumer) HasK8sNodePoolUuid() bool { + if o != nil && o.K8sNodePoolUuid != nil { return true } return false } -// GetDatacenterId returns the DatacenterId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetDatacenterId() *string { +// GetMac returns the Mac field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetMac() *string { if o == nil { return nil } - return o.DatacenterId + return o.Mac } -// GetDatacenterIdOk returns a tuple with the DatacenterId field value +// GetMacOk returns a tuple with the Mac field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetDatacenterIdOk() (*string, bool) { +func (o *IpConsumer) GetMacOk() (*string, bool) { if o == nil { return nil, false } - return o.DatacenterId, true + return o.Mac, true } -// SetDatacenterId sets field value -func (o *IpConsumer) SetDatacenterId(v string) { +// SetMac sets field value +func (o *IpConsumer) SetMac(v string) { - o.DatacenterId = &v + o.Mac = &v } -// HasDatacenterId returns a boolean if a field has been set. -func (o *IpConsumer) HasDatacenterId() bool { - if o != nil && o.DatacenterId != nil { +// HasMac returns a boolean if a field has been set. +func (o *IpConsumer) HasMac() bool { + if o != nil && o.Mac != nil { return true } return false } -// GetDatacenterName returns the DatacenterName field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetDatacenterName() *string { +// GetNicId returns the NicId field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetNicId() *string { if o == nil { return nil } - return o.DatacenterName + return o.NicId } -// GetDatacenterNameOk returns a tuple with the DatacenterName field value +// GetNicIdOk returns a tuple with the NicId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetDatacenterNameOk() (*string, bool) { +func (o *IpConsumer) GetNicIdOk() (*string, bool) { if o == nil { return nil, false } - return o.DatacenterName, true + return o.NicId, true } -// SetDatacenterName sets field value -func (o *IpConsumer) SetDatacenterName(v string) { +// SetNicId sets field value +func (o *IpConsumer) SetNicId(v string) { - o.DatacenterName = &v + o.NicId = &v } -// HasDatacenterName returns a boolean if a field has been set. -func (o *IpConsumer) HasDatacenterName() bool { - if o != nil && o.DatacenterName != nil { +// HasNicId returns a boolean if a field has been set. +func (o *IpConsumer) HasNicId() bool { + if o != nil && o.NicId != nil { return true } return false } -// GetK8sNodePoolUuid returns the K8sNodePoolUuid field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetK8sNodePoolUuid() *string { +// GetServerId returns the ServerId field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetServerId() *string { if o == nil { return nil } - return o.K8sNodePoolUuid + return o.ServerId } -// GetK8sNodePoolUuidOk returns a tuple with the K8sNodePoolUuid field value +// GetServerIdOk returns a tuple with the ServerId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetK8sNodePoolUuidOk() (*string, bool) { +func (o *IpConsumer) GetServerIdOk() (*string, bool) { if o == nil { return nil, false } - return o.K8sNodePoolUuid, true + return o.ServerId, true } -// SetK8sNodePoolUuid sets field value -func (o *IpConsumer) SetK8sNodePoolUuid(v string) { +// SetServerId sets field value +func (o *IpConsumer) SetServerId(v string) { - o.K8sNodePoolUuid = &v + o.ServerId = &v } -// HasK8sNodePoolUuid returns a boolean if a field has been set. -func (o *IpConsumer) HasK8sNodePoolUuid() bool { - if o != nil && o.K8sNodePoolUuid != nil { +// HasServerId returns a boolean if a field has been set. +func (o *IpConsumer) HasServerId() bool { + if o != nil && o.ServerId != nil { return true } return false } -// GetK8sClusterUuid returns the K8sClusterUuid field value -// If the value is explicit nil, the zero value for string will be returned -func (o *IpConsumer) GetK8sClusterUuid() *string { +// GetServerName returns the ServerName field value +// If the value is explicit nil, nil is returned +func (o *IpConsumer) GetServerName() *string { if o == nil { return nil } - return o.K8sClusterUuid + return o.ServerName } -// GetK8sClusterUuidOk returns a tuple with the K8sClusterUuid field value +// GetServerNameOk returns a tuple with the ServerName field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *IpConsumer) GetK8sClusterUuidOk() (*string, bool) { +func (o *IpConsumer) GetServerNameOk() (*string, bool) { if o == nil { return nil, false } - return o.K8sClusterUuid, true + return o.ServerName, true } -// SetK8sClusterUuid sets field value -func (o *IpConsumer) SetK8sClusterUuid(v string) { +// SetServerName sets field value +func (o *IpConsumer) SetServerName(v string) { - o.K8sClusterUuid = &v + o.ServerName = &v } -// HasK8sClusterUuid returns a boolean if a field has been set. -func (o *IpConsumer) HasK8sClusterUuid() bool { - if o != nil && o.K8sClusterUuid != nil { +// HasServerName returns a boolean if a field has been set. +func (o *IpConsumer) HasServerName() bool { + if o != nil && o.ServerName != nil { return true } @@ -389,33 +389,42 @@ func (o *IpConsumer) HasK8sClusterUuid() bool { func (o IpConsumer) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.DatacenterId != nil { + toSerialize["datacenterId"] = o.DatacenterId + } + + if o.DatacenterName != nil { + toSerialize["datacenterName"] = o.DatacenterName + } + if o.Ip != nil { toSerialize["ip"] = o.Ip } + + if o.K8sClusterUuid != nil { + toSerialize["k8sClusterUuid"] = o.K8sClusterUuid + } + + if o.K8sNodePoolUuid != nil { + toSerialize["k8sNodePoolUuid"] = o.K8sNodePoolUuid + } + if o.Mac != nil { toSerialize["mac"] = o.Mac } + if o.NicId != nil { toSerialize["nicId"] = o.NicId } + if o.ServerId != nil { toSerialize["serverId"] = o.ServerId } + if o.ServerName != nil { toSerialize["serverName"] = o.ServerName } - if o.DatacenterId != nil { - toSerialize["datacenterId"] = o.DatacenterId - } - if o.DatacenterName != nil { - toSerialize["datacenterName"] = o.DatacenterName - } - if o.K8sNodePoolUuid != nil { - toSerialize["k8sNodePoolUuid"] = o.K8sNodePoolUuid - } - if o.K8sClusterUuid != nil { - toSerialize["k8sClusterUuid"] = o.K8sClusterUuid - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_failover.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_failover.go index 16677e73e..60c902a97 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_failover.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_ip_failover.go @@ -39,7 +39,7 @@ func NewIPFailoverWithDefaults() *IPFailover { } // GetIp returns the Ip field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *IPFailover) GetIp() *string { if o == nil { return nil @@ -77,7 +77,7 @@ func (o *IPFailover) HasIp() bool { } // GetNicUuid returns the NicUuid field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *IPFailover) GetNicUuid() *string { if o == nil { return nil @@ -119,9 +119,11 @@ func (o IPFailover) MarshalJSON() ([]byte, error) { if o.Ip != nil { toSerialize["ip"] = o.Ip } + if o.NicUuid != nil { toSerialize["nicUuid"] = o.NicUuid } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go index 24f95d7b3..49d9b3e86 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go @@ -16,21 +16,21 @@ import ( // KubernetesAutoScaling struct for KubernetesAutoScaling type KubernetesAutoScaling struct { - // The minimum number of working nodes that the managed node pool can scale must be >= 1 and >= nodeCount. Required if autoScaling is specified. - MinNodeCount *int32 `json:"minNodeCount"` // The maximum number of worker nodes that the managed node pool can scale in. Must be >= minNodeCount and must be >= nodeCount. Required if autoScaling is specified. MaxNodeCount *int32 `json:"maxNodeCount"` + // The minimum number of working nodes that the managed node pool can scale must be >= 1 and >= nodeCount. Required if autoScaling is specified. + MinNodeCount *int32 `json:"minNodeCount"` } // NewKubernetesAutoScaling instantiates a new KubernetesAutoScaling object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewKubernetesAutoScaling(minNodeCount int32, maxNodeCount int32) *KubernetesAutoScaling { +func NewKubernetesAutoScaling(maxNodeCount int32, minNodeCount int32) *KubernetesAutoScaling { this := KubernetesAutoScaling{} - this.MinNodeCount = &minNodeCount this.MaxNodeCount = &maxNodeCount + this.MinNodeCount = &minNodeCount return &this } @@ -43,76 +43,76 @@ func NewKubernetesAutoScalingWithDefaults() *KubernetesAutoScaling { return &this } -// GetMinNodeCount returns the MinNodeCount field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesAutoScaling) GetMinNodeCount() *int32 { +// GetMaxNodeCount returns the MaxNodeCount field value +// If the value is explicit nil, nil is returned +func (o *KubernetesAutoScaling) GetMaxNodeCount() *int32 { if o == nil { return nil } - return o.MinNodeCount + return o.MaxNodeCount } -// GetMinNodeCountOk returns a tuple with the MinNodeCount field value +// GetMaxNodeCountOk returns a tuple with the MaxNodeCount field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesAutoScaling) GetMinNodeCountOk() (*int32, bool) { +func (o *KubernetesAutoScaling) GetMaxNodeCountOk() (*int32, bool) { if o == nil { return nil, false } - return o.MinNodeCount, true + return o.MaxNodeCount, true } -// SetMinNodeCount sets field value -func (o *KubernetesAutoScaling) SetMinNodeCount(v int32) { +// SetMaxNodeCount sets field value +func (o *KubernetesAutoScaling) SetMaxNodeCount(v int32) { - o.MinNodeCount = &v + o.MaxNodeCount = &v } -// HasMinNodeCount returns a boolean if a field has been set. -func (o *KubernetesAutoScaling) HasMinNodeCount() bool { - if o != nil && o.MinNodeCount != nil { +// HasMaxNodeCount returns a boolean if a field has been set. +func (o *KubernetesAutoScaling) HasMaxNodeCount() bool { + if o != nil && o.MaxNodeCount != nil { return true } return false } -// GetMaxNodeCount returns the MaxNodeCount field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesAutoScaling) GetMaxNodeCount() *int32 { +// GetMinNodeCount returns the MinNodeCount field value +// If the value is explicit nil, nil is returned +func (o *KubernetesAutoScaling) GetMinNodeCount() *int32 { if o == nil { return nil } - return o.MaxNodeCount + return o.MinNodeCount } -// GetMaxNodeCountOk returns a tuple with the MaxNodeCount field value +// GetMinNodeCountOk returns a tuple with the MinNodeCount field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesAutoScaling) GetMaxNodeCountOk() (*int32, bool) { +func (o *KubernetesAutoScaling) GetMinNodeCountOk() (*int32, bool) { if o == nil { return nil, false } - return o.MaxNodeCount, true + return o.MinNodeCount, true } -// SetMaxNodeCount sets field value -func (o *KubernetesAutoScaling) SetMaxNodeCount(v int32) { +// SetMinNodeCount sets field value +func (o *KubernetesAutoScaling) SetMinNodeCount(v int32) { - o.MaxNodeCount = &v + o.MinNodeCount = &v } -// HasMaxNodeCount returns a boolean if a field has been set. -func (o *KubernetesAutoScaling) HasMaxNodeCount() bool { - if o != nil && o.MaxNodeCount != nil { +// HasMinNodeCount returns a boolean if a field has been set. +func (o *KubernetesAutoScaling) HasMinNodeCount() bool { + if o != nil && o.MinNodeCount != nil { return true } @@ -121,12 +121,14 @@ func (o *KubernetesAutoScaling) HasMaxNodeCount() bool { func (o KubernetesAutoScaling) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.MinNodeCount != nil { - toSerialize["minNodeCount"] = o.MinNodeCount - } if o.MaxNodeCount != nil { toSerialize["maxNodeCount"] = o.MaxNodeCount } + + if o.MinNodeCount != nil { + toSerialize["minNodeCount"] = o.MinNodeCount + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go index 631bda922..994a6ac28 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go @@ -16,15 +16,15 @@ import ( // KubernetesCluster struct for KubernetesCluster type KubernetesCluster struct { - // The resource unique identifier. - Id *string `json:"id,omitempty"` - // The object type. - Type *string `json:"type,omitempty"` + Entities *KubernetesClusterEntities `json:"entities,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesClusterProperties `json:"properties"` - Entities *KubernetesClusterEntities `json:"entities,omitempty"` + // The object type. + Type *string `json:"type,omitempty"` } // NewKubernetesCluster instantiates a new KubernetesCluster object @@ -47,114 +47,114 @@ func NewKubernetesClusterWithDefaults() *KubernetesCluster { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesCluster) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *KubernetesCluster) GetEntities() *KubernetesClusterEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesCluster) GetIdOk() (*string, bool) { +func (o *KubernetesCluster) GetEntitiesOk() (*KubernetesClusterEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *KubernetesCluster) SetId(v string) { +// SetEntities sets field value +func (o *KubernetesCluster) SetEntities(v KubernetesClusterEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesCluster) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *KubernetesCluster) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesCluster) GetType() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesCluster) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesCluster) GetTypeOk() (*string, bool) { +func (o *KubernetesCluster) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *KubernetesCluster) SetType(v string) { +// SetHref sets field value +func (o *KubernetesCluster) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesCluster) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesCluster) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesCluster) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesCluster) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesCluster) GetHrefOk() (*string, bool) { +func (o *KubernetesCluster) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *KubernetesCluster) SetHref(v string) { +// SetId sets field value +func (o *KubernetesCluster) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesCluster) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesCluster) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *KubernetesCluster) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesCluster) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *KubernetesCluster) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesClusterProperties will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesCluster) GetProperties() *KubernetesClusterProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *KubernetesCluster) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for KubernetesClusterEntities will be returned -func (o *KubernetesCluster) GetEntities() *KubernetesClusterEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesCluster) GetType() *string { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesCluster) GetEntitiesOk() (*KubernetesClusterEntities, bool) { +func (o *KubernetesCluster) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *KubernetesCluster) SetEntities(v KubernetesClusterEntities) { +// SetType sets field value +func (o *KubernetesCluster) SetType(v string) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *KubernetesCluster) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesCluster) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *KubernetesCluster) HasEntities() bool { func (o KubernetesCluster) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_entities.go index 3e5dbaf4d..0e7daca34 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_entities.go @@ -38,7 +38,7 @@ func NewKubernetesClusterEntitiesWithDefaults() *KubernetesClusterEntities { } // GetNodepools returns the Nodepools field value -// If the value is explicit nil, the zero value for KubernetesNodePools will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterEntities) GetNodepools() *KubernetesNodePools { if o == nil { return nil @@ -80,6 +80,7 @@ func (o KubernetesClusterEntities) MarshalJSON() ([]byte, error) { if o.Nodepools != nil { toSerialize["nodepools"] = o.Nodepools } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go index 1fbe331ad..cf2d02e62 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go @@ -16,15 +16,15 @@ import ( // KubernetesClusterForPost struct for KubernetesClusterForPost type KubernetesClusterForPost struct { - // The resource unique identifier. - Id *string `json:"id,omitempty"` - // The object type. - Type *string `json:"type,omitempty"` + Entities *KubernetesClusterEntities `json:"entities,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesClusterPropertiesForPost `json:"properties"` - Entities *KubernetesClusterEntities `json:"entities,omitempty"` + // The object type. + Type *string `json:"type,omitempty"` } // NewKubernetesClusterForPost instantiates a new KubernetesClusterForPost object @@ -47,114 +47,114 @@ func NewKubernetesClusterForPostWithDefaults() *KubernetesClusterForPost { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterForPost) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPost) GetEntities() *KubernetesClusterEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPost) GetIdOk() (*string, bool) { +func (o *KubernetesClusterForPost) GetEntitiesOk() (*KubernetesClusterEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *KubernetesClusterForPost) SetId(v string) { +// SetEntities sets field value +func (o *KubernetesClusterForPost) SetEntities(v KubernetesClusterEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesClusterForPost) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *KubernetesClusterForPost) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterForPost) GetType() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPost) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPost) GetTypeOk() (*string, bool) { +func (o *KubernetesClusterForPost) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *KubernetesClusterForPost) SetType(v string) { +// SetHref sets field value +func (o *KubernetesClusterForPost) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesClusterForPost) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesClusterForPost) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterForPost) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPost) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPost) GetHrefOk() (*string, bool) { +func (o *KubernetesClusterForPost) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *KubernetesClusterForPost) SetHref(v string) { +// SetId sets field value +func (o *KubernetesClusterForPost) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesClusterForPost) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesClusterForPost) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *KubernetesClusterForPost) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterForPost) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *KubernetesClusterForPost) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesClusterPropertiesForPost will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterForPost) GetProperties() *KubernetesClusterPropertiesForPost { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *KubernetesClusterForPost) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for KubernetesClusterEntities will be returned -func (o *KubernetesClusterForPost) GetEntities() *KubernetesClusterEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPost) GetType() *string { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPost) GetEntitiesOk() (*KubernetesClusterEntities, bool) { +func (o *KubernetesClusterForPost) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *KubernetesClusterForPost) SetEntities(v KubernetesClusterEntities) { +// SetType sets field value +func (o *KubernetesClusterForPost) SetType(v string) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *KubernetesClusterForPost) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesClusterForPost) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *KubernetesClusterForPost) HasEntities() bool { func (o KubernetesClusterForPost) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_put.go index 2873bf5c8..6edf7ca5f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_put.go @@ -16,15 +16,15 @@ import ( // KubernetesClusterForPut struct for KubernetesClusterForPut type KubernetesClusterForPut struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object. - Type *string `json:"type,omitempty"` + Entities *KubernetesClusterEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesClusterPropertiesForPut `json:"properties"` - Entities *KubernetesClusterEntities `json:"entities,omitempty"` + // The type of object. + Type *string `json:"type,omitempty"` } // NewKubernetesClusterForPut instantiates a new KubernetesClusterForPut object @@ -47,114 +47,114 @@ func NewKubernetesClusterForPutWithDefaults() *KubernetesClusterForPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterForPut) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPut) GetEntities() *KubernetesClusterEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPut) GetIdOk() (*string, bool) { +func (o *KubernetesClusterForPut) GetEntitiesOk() (*KubernetesClusterEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *KubernetesClusterForPut) SetId(v string) { +// SetEntities sets field value +func (o *KubernetesClusterForPut) SetEntities(v KubernetesClusterEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesClusterForPut) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *KubernetesClusterForPut) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterForPut) GetType() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPut) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPut) GetTypeOk() (*string, bool) { +func (o *KubernetesClusterForPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *KubernetesClusterForPut) SetType(v string) { +// SetHref sets field value +func (o *KubernetesClusterForPut) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesClusterForPut) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesClusterForPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterForPut) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPut) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPut) GetHrefOk() (*string, bool) { +func (o *KubernetesClusterForPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *KubernetesClusterForPut) SetHref(v string) { +// SetId sets field value +func (o *KubernetesClusterForPut) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesClusterForPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesClusterForPut) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *KubernetesClusterForPut) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterForPut) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *KubernetesClusterForPut) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesClusterPropertiesForPut will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterForPut) GetProperties() *KubernetesClusterPropertiesForPut { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *KubernetesClusterForPut) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for KubernetesClusterEntities will be returned -func (o *KubernetesClusterForPut) GetEntities() *KubernetesClusterEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterForPut) GetType() *string { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterForPut) GetEntitiesOk() (*KubernetesClusterEntities, bool) { +func (o *KubernetesClusterForPut) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *KubernetesClusterForPut) SetEntities(v KubernetesClusterEntities) { +// SetType sets field value +func (o *KubernetesClusterForPut) SetType(v string) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *KubernetesClusterForPut) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesClusterForPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *KubernetesClusterForPut) HasEntities() bool { func (o KubernetesClusterForPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties.go index d88717362..66956d3f7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties.go @@ -16,19 +16,19 @@ import ( // KubernetesClusterProperties struct for KubernetesClusterProperties type KubernetesClusterProperties struct { - // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. - Name *string `json:"name"` + // Access to the K8s API server is restricted to these CIDRs. Traffic, internal to the cluster, is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value is used: 32 for IPv4 and 128 for IPv6. + ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` + // List of available versions for upgrading the cluster + AvailableUpgradeVersions *[]string `json:"availableUpgradeVersions,omitempty"` // The Kubernetes version the cluster is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - // List of available versions for upgrading the cluster - AvailableUpgradeVersions *[]string `json:"availableUpgradeVersions,omitempty"` - // List of versions that may be used for node pools under this cluster - ViableNodePoolVersions *[]string `json:"viableNodePoolVersions,omitempty"` - // Access to the K8s API server is restricted to these CIDRs. Traffic, internal to the cluster, is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value is used: 32 for IPv4 and 128 for IPv6. - ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` + // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + Name *string `json:"name"` // List of S3 bucket configured for K8s usage. For now it contains only an S3 bucket used to store K8s API audit logs S3Buckets *[]S3Bucket `json:"s3Buckets,omitempty"` + // List of versions that may be used for node pools under this cluster + ViableNodePoolVersions *[]string `json:"viableNodePoolVersions,omitempty"` } // NewKubernetesClusterProperties instantiates a new KubernetesClusterProperties object @@ -51,266 +51,266 @@ func NewKubernetesClusterPropertiesWithDefaults() *KubernetesClusterProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterProperties) GetName() *string { +// GetApiSubnetAllowList returns the ApiSubnetAllowList field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetApiSubnetAllowList() *[]string { if o == nil { return nil } - return o.Name + return o.ApiSubnetAllowList } -// GetNameOk returns a tuple with the Name field value +// GetApiSubnetAllowListOk returns a tuple with the ApiSubnetAllowList field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetNameOk() (*string, bool) { +func (o *KubernetesClusterProperties) GetApiSubnetAllowListOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.ApiSubnetAllowList, true } -// SetName sets field value -func (o *KubernetesClusterProperties) SetName(v string) { +// SetApiSubnetAllowList sets field value +func (o *KubernetesClusterProperties) SetApiSubnetAllowList(v []string) { - o.Name = &v + o.ApiSubnetAllowList = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasApiSubnetAllowList returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasApiSubnetAllowList() bool { + if o != nil && o.ApiSubnetAllowList != nil { return true } return false } -// GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterProperties) GetK8sVersion() *string { +// GetAvailableUpgradeVersions returns the AvailableUpgradeVersions field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetAvailableUpgradeVersions() *[]string { if o == nil { return nil } - return o.K8sVersion + return o.AvailableUpgradeVersions } -// GetK8sVersionOk returns a tuple with the K8sVersion field value +// GetAvailableUpgradeVersionsOk returns a tuple with the AvailableUpgradeVersions field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetK8sVersionOk() (*string, bool) { +func (o *KubernetesClusterProperties) GetAvailableUpgradeVersionsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.K8sVersion, true + return o.AvailableUpgradeVersions, true } -// SetK8sVersion sets field value -func (o *KubernetesClusterProperties) SetK8sVersion(v string) { +// SetAvailableUpgradeVersions sets field value +func (o *KubernetesClusterProperties) SetAvailableUpgradeVersions(v []string) { - o.K8sVersion = &v + o.AvailableUpgradeVersions = &v } -// HasK8sVersion returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasK8sVersion() bool { - if o != nil && o.K8sVersion != nil { +// HasAvailableUpgradeVersions returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasAvailableUpgradeVersions() bool { + if o != nil && o.AvailableUpgradeVersions != nil { return true } return false } -// GetMaintenanceWindow returns the MaintenanceWindow field value -// If the value is explicit nil, the zero value for KubernetesMaintenanceWindow will be returned -func (o *KubernetesClusterProperties) GetMaintenanceWindow() *KubernetesMaintenanceWindow { +// GetK8sVersion returns the K8sVersion field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetK8sVersion() *string { if o == nil { return nil } - return o.MaintenanceWindow + return o.K8sVersion } -// GetMaintenanceWindowOk returns a tuple with the MaintenanceWindow field value +// GetK8sVersionOk returns a tuple with the K8sVersion field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetMaintenanceWindowOk() (*KubernetesMaintenanceWindow, bool) { +func (o *KubernetesClusterProperties) GetK8sVersionOk() (*string, bool) { if o == nil { return nil, false } - return o.MaintenanceWindow, true + return o.K8sVersion, true } -// SetMaintenanceWindow sets field value -func (o *KubernetesClusterProperties) SetMaintenanceWindow(v KubernetesMaintenanceWindow) { +// SetK8sVersion sets field value +func (o *KubernetesClusterProperties) SetK8sVersion(v string) { - o.MaintenanceWindow = &v + o.K8sVersion = &v } -// HasMaintenanceWindow returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasMaintenanceWindow() bool { - if o != nil && o.MaintenanceWindow != nil { +// HasK8sVersion returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasK8sVersion() bool { + if o != nil && o.K8sVersion != nil { return true } return false } -// GetAvailableUpgradeVersions returns the AvailableUpgradeVersions field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesClusterProperties) GetAvailableUpgradeVersions() *[]string { +// GetMaintenanceWindow returns the MaintenanceWindow field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetMaintenanceWindow() *KubernetesMaintenanceWindow { if o == nil { return nil } - return o.AvailableUpgradeVersions + return o.MaintenanceWindow } -// GetAvailableUpgradeVersionsOk returns a tuple with the AvailableUpgradeVersions field value +// GetMaintenanceWindowOk returns a tuple with the MaintenanceWindow field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetAvailableUpgradeVersionsOk() (*[]string, bool) { +func (o *KubernetesClusterProperties) GetMaintenanceWindowOk() (*KubernetesMaintenanceWindow, bool) { if o == nil { return nil, false } - return o.AvailableUpgradeVersions, true + return o.MaintenanceWindow, true } -// SetAvailableUpgradeVersions sets field value -func (o *KubernetesClusterProperties) SetAvailableUpgradeVersions(v []string) { +// SetMaintenanceWindow sets field value +func (o *KubernetesClusterProperties) SetMaintenanceWindow(v KubernetesMaintenanceWindow) { - o.AvailableUpgradeVersions = &v + o.MaintenanceWindow = &v } -// HasAvailableUpgradeVersions returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasAvailableUpgradeVersions() bool { - if o != nil && o.AvailableUpgradeVersions != nil { +// HasMaintenanceWindow returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasMaintenanceWindow() bool { + if o != nil && o.MaintenanceWindow != nil { return true } return false } -// GetViableNodePoolVersions returns the ViableNodePoolVersions field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesClusterProperties) GetViableNodePoolVersions() *[]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetName() *string { if o == nil { return nil } - return o.ViableNodePoolVersions + return o.Name } -// GetViableNodePoolVersionsOk returns a tuple with the ViableNodePoolVersions field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetViableNodePoolVersionsOk() (*[]string, bool) { +func (o *KubernetesClusterProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.ViableNodePoolVersions, true + return o.Name, true } -// SetViableNodePoolVersions sets field value -func (o *KubernetesClusterProperties) SetViableNodePoolVersions(v []string) { +// SetName sets field value +func (o *KubernetesClusterProperties) SetName(v string) { - o.ViableNodePoolVersions = &v + o.Name = &v } -// HasViableNodePoolVersions returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasViableNodePoolVersions() bool { - if o != nil && o.ViableNodePoolVersions != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetApiSubnetAllowList returns the ApiSubnetAllowList field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesClusterProperties) GetApiSubnetAllowList() *[]string { +// GetS3Buckets returns the S3Buckets field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetS3Buckets() *[]S3Bucket { if o == nil { return nil } - return o.ApiSubnetAllowList + return o.S3Buckets } -// GetApiSubnetAllowListOk returns a tuple with the ApiSubnetAllowList field value +// GetS3BucketsOk returns a tuple with the S3Buckets field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetApiSubnetAllowListOk() (*[]string, bool) { +func (o *KubernetesClusterProperties) GetS3BucketsOk() (*[]S3Bucket, bool) { if o == nil { return nil, false } - return o.ApiSubnetAllowList, true + return o.S3Buckets, true } -// SetApiSubnetAllowList sets field value -func (o *KubernetesClusterProperties) SetApiSubnetAllowList(v []string) { +// SetS3Buckets sets field value +func (o *KubernetesClusterProperties) SetS3Buckets(v []S3Bucket) { - o.ApiSubnetAllowList = &v + o.S3Buckets = &v } -// HasApiSubnetAllowList returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasApiSubnetAllowList() bool { - if o != nil && o.ApiSubnetAllowList != nil { +// HasS3Buckets returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasS3Buckets() bool { + if o != nil && o.S3Buckets != nil { return true } return false } -// GetS3Buckets returns the S3Buckets field value -// If the value is explicit nil, the zero value for []S3Bucket will be returned -func (o *KubernetesClusterProperties) GetS3Buckets() *[]S3Bucket { +// GetViableNodePoolVersions returns the ViableNodePoolVersions field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterProperties) GetViableNodePoolVersions() *[]string { if o == nil { return nil } - return o.S3Buckets + return o.ViableNodePoolVersions } -// GetS3BucketsOk returns a tuple with the S3Buckets field value +// GetViableNodePoolVersionsOk returns a tuple with the ViableNodePoolVersions field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterProperties) GetS3BucketsOk() (*[]S3Bucket, bool) { +func (o *KubernetesClusterProperties) GetViableNodePoolVersionsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.S3Buckets, true + return o.ViableNodePoolVersions, true } -// SetS3Buckets sets field value -func (o *KubernetesClusterProperties) SetS3Buckets(v []S3Bucket) { +// SetViableNodePoolVersions sets field value +func (o *KubernetesClusterProperties) SetViableNodePoolVersions(v []string) { - o.S3Buckets = &v + o.ViableNodePoolVersions = &v } -// HasS3Buckets returns a boolean if a field has been set. -func (o *KubernetesClusterProperties) HasS3Buckets() bool { - if o != nil && o.S3Buckets != nil { +// HasViableNodePoolVersions returns a boolean if a field has been set. +func (o *KubernetesClusterProperties) HasViableNodePoolVersions() bool { + if o != nil && o.ViableNodePoolVersions != nil { return true } @@ -319,27 +319,34 @@ func (o *KubernetesClusterProperties) HasS3Buckets() bool { func (o KubernetesClusterProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.ApiSubnetAllowList != nil { + toSerialize["apiSubnetAllowList"] = o.ApiSubnetAllowList + } + + if o.AvailableUpgradeVersions != nil { + toSerialize["availableUpgradeVersions"] = o.AvailableUpgradeVersions } + if o.K8sVersion != nil { toSerialize["k8sVersion"] = o.K8sVersion } + if o.MaintenanceWindow != nil { toSerialize["maintenanceWindow"] = o.MaintenanceWindow } - if o.AvailableUpgradeVersions != nil { - toSerialize["availableUpgradeVersions"] = o.AvailableUpgradeVersions - } - if o.ViableNodePoolVersions != nil { - toSerialize["viableNodePoolVersions"] = o.ViableNodePoolVersions - } - if o.ApiSubnetAllowList != nil { - toSerialize["apiSubnetAllowList"] = o.ApiSubnetAllowList + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.S3Buckets != nil { toSerialize["s3Buckets"] = o.S3Buckets } + + if o.ViableNodePoolVersions != nil { + toSerialize["viableNodePoolVersions"] = o.ViableNodePoolVersions + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go index b44a3227a..9215373b9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go @@ -16,13 +16,13 @@ import ( // KubernetesClusterPropertiesForPost struct for KubernetesClusterPropertiesForPost type KubernetesClusterPropertiesForPost struct { - // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. - Name *string `json:"name"` + // Access to the K8s API server is restricted to these CIDRs. Intra-cluster traffic is not affected by this restriction. If no AllowList is specified, access is not limited. If an IP is specified without a subnet mask, the default value is 32 for IPv4 and 128 for IPv6. + ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` // The Kubernetes version that the cluster is running. This limits which Kubernetes versions can run in a cluster's node pools. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - // Access to the K8s API server is restricted to these CIDRs. Intra-cluster traffic is not affected by this restriction. If no AllowList is specified, access is not limited. If an IP is specified without a subnet mask, the default value is 32 for IPv4 and 128 for IPv6. - ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` + // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + Name *string `json:"name"` // List of S3 buckets configured for K8s usage. At the moment, it contains only one S3 bucket that is used to store K8s API audit logs. S3Buckets *[]S3Bucket `json:"s3Buckets,omitempty"` } @@ -47,38 +47,38 @@ func NewKubernetesClusterPropertiesForPostWithDefaults() *KubernetesClusterPrope return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterPropertiesForPost) GetName() *string { +// GetApiSubnetAllowList returns the ApiSubnetAllowList field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterPropertiesForPost) GetApiSubnetAllowList() *[]string { if o == nil { return nil } - return o.Name + return o.ApiSubnetAllowList } -// GetNameOk returns a tuple with the Name field value +// GetApiSubnetAllowListOk returns a tuple with the ApiSubnetAllowList field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterPropertiesForPost) GetNameOk() (*string, bool) { +func (o *KubernetesClusterPropertiesForPost) GetApiSubnetAllowListOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.ApiSubnetAllowList, true } -// SetName sets field value -func (o *KubernetesClusterPropertiesForPost) SetName(v string) { +// SetApiSubnetAllowList sets field value +func (o *KubernetesClusterPropertiesForPost) SetApiSubnetAllowList(v []string) { - o.Name = &v + o.ApiSubnetAllowList = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesClusterPropertiesForPost) HasName() bool { - if o != nil && o.Name != nil { +// HasApiSubnetAllowList returns a boolean if a field has been set. +func (o *KubernetesClusterPropertiesForPost) HasApiSubnetAllowList() bool { + if o != nil && o.ApiSubnetAllowList != nil { return true } @@ -86,7 +86,7 @@ func (o *KubernetesClusterPropertiesForPost) HasName() bool { } // GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterPropertiesForPost) GetK8sVersion() *string { if o == nil { return nil @@ -124,7 +124,7 @@ func (o *KubernetesClusterPropertiesForPost) HasK8sVersion() bool { } // GetMaintenanceWindow returns the MaintenanceWindow field value -// If the value is explicit nil, the zero value for KubernetesMaintenanceWindow will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterPropertiesForPost) GetMaintenanceWindow() *KubernetesMaintenanceWindow { if o == nil { return nil @@ -161,38 +161,38 @@ func (o *KubernetesClusterPropertiesForPost) HasMaintenanceWindow() bool { return false } -// GetApiSubnetAllowList returns the ApiSubnetAllowList field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesClusterPropertiesForPost) GetApiSubnetAllowList() *[]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterPropertiesForPost) GetName() *string { if o == nil { return nil } - return o.ApiSubnetAllowList + return o.Name } -// GetApiSubnetAllowListOk returns a tuple with the ApiSubnetAllowList field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterPropertiesForPost) GetApiSubnetAllowListOk() (*[]string, bool) { +func (o *KubernetesClusterPropertiesForPost) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.ApiSubnetAllowList, true + return o.Name, true } -// SetApiSubnetAllowList sets field value -func (o *KubernetesClusterPropertiesForPost) SetApiSubnetAllowList(v []string) { +// SetName sets field value +func (o *KubernetesClusterPropertiesForPost) SetName(v string) { - o.ApiSubnetAllowList = &v + o.Name = &v } -// HasApiSubnetAllowList returns a boolean if a field has been set. -func (o *KubernetesClusterPropertiesForPost) HasApiSubnetAllowList() bool { - if o != nil && o.ApiSubnetAllowList != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesClusterPropertiesForPost) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -200,7 +200,7 @@ func (o *KubernetesClusterPropertiesForPost) HasApiSubnetAllowList() bool { } // GetS3Buckets returns the S3Buckets field value -// If the value is explicit nil, the zero value for []S3Bucket will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterPropertiesForPost) GetS3Buckets() *[]S3Bucket { if o == nil { return nil @@ -239,21 +239,26 @@ func (o *KubernetesClusterPropertiesForPost) HasS3Buckets() bool { func (o KubernetesClusterPropertiesForPost) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.ApiSubnetAllowList != nil { + toSerialize["apiSubnetAllowList"] = o.ApiSubnetAllowList } + if o.K8sVersion != nil { toSerialize["k8sVersion"] = o.K8sVersion } + if o.MaintenanceWindow != nil { toSerialize["maintenanceWindow"] = o.MaintenanceWindow } - if o.ApiSubnetAllowList != nil { - toSerialize["apiSubnetAllowList"] = o.ApiSubnetAllowList + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.S3Buckets != nil { toSerialize["s3Buckets"] = o.S3Buckets } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go index c2b9d44cd..b2bc4980d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go @@ -16,13 +16,13 @@ import ( // KubernetesClusterPropertiesForPut struct for KubernetesClusterPropertiesForPut type KubernetesClusterPropertiesForPut struct { - // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. - Name *string `json:"name"` + // Access to the K8s API server is restricted to these CIDRs. Intra-cluster traffic is not affected by this restriction. If no AllowList is specified, access is not limited. If an IP is specified without a subnet mask, the default value is 32 for IPv4 and 128 for IPv6. + ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` // The Kubernetes version that the cluster is running. This limits which Kubernetes versions can run in a cluster's node pools. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - // Access to the K8s API server is restricted to these CIDRs. Intra-cluster traffic is not affected by this restriction. If no AllowList is specified, access is not limited. If an IP is specified without a subnet mask, the default value is 32 for IPv4 and 128 for IPv6. - ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` + // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + Name *string `json:"name"` // List of S3 buckets configured for K8s usage. At the moment, it contains only one S3 bucket that is used to store K8s API audit logs. S3Buckets *[]S3Bucket `json:"s3Buckets,omitempty"` } @@ -47,38 +47,38 @@ func NewKubernetesClusterPropertiesForPutWithDefaults() *KubernetesClusterProper return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusterPropertiesForPut) GetName() *string { +// GetApiSubnetAllowList returns the ApiSubnetAllowList field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterPropertiesForPut) GetApiSubnetAllowList() *[]string { if o == nil { return nil } - return o.Name + return o.ApiSubnetAllowList } -// GetNameOk returns a tuple with the Name field value +// GetApiSubnetAllowListOk returns a tuple with the ApiSubnetAllowList field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterPropertiesForPut) GetNameOk() (*string, bool) { +func (o *KubernetesClusterPropertiesForPut) GetApiSubnetAllowListOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.ApiSubnetAllowList, true } -// SetName sets field value -func (o *KubernetesClusterPropertiesForPut) SetName(v string) { +// SetApiSubnetAllowList sets field value +func (o *KubernetesClusterPropertiesForPut) SetApiSubnetAllowList(v []string) { - o.Name = &v + o.ApiSubnetAllowList = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesClusterPropertiesForPut) HasName() bool { - if o != nil && o.Name != nil { +// HasApiSubnetAllowList returns a boolean if a field has been set. +func (o *KubernetesClusterPropertiesForPut) HasApiSubnetAllowList() bool { + if o != nil && o.ApiSubnetAllowList != nil { return true } @@ -86,7 +86,7 @@ func (o *KubernetesClusterPropertiesForPut) HasName() bool { } // GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterPropertiesForPut) GetK8sVersion() *string { if o == nil { return nil @@ -124,7 +124,7 @@ func (o *KubernetesClusterPropertiesForPut) HasK8sVersion() bool { } // GetMaintenanceWindow returns the MaintenanceWindow field value -// If the value is explicit nil, the zero value for KubernetesMaintenanceWindow will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterPropertiesForPut) GetMaintenanceWindow() *KubernetesMaintenanceWindow { if o == nil { return nil @@ -161,38 +161,38 @@ func (o *KubernetesClusterPropertiesForPut) HasMaintenanceWindow() bool { return false } -// GetApiSubnetAllowList returns the ApiSubnetAllowList field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesClusterPropertiesForPut) GetApiSubnetAllowList() *[]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusterPropertiesForPut) GetName() *string { if o == nil { return nil } - return o.ApiSubnetAllowList + return o.Name } -// GetApiSubnetAllowListOk returns a tuple with the ApiSubnetAllowList field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusterPropertiesForPut) GetApiSubnetAllowListOk() (*[]string, bool) { +func (o *KubernetesClusterPropertiesForPut) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.ApiSubnetAllowList, true + return o.Name, true } -// SetApiSubnetAllowList sets field value -func (o *KubernetesClusterPropertiesForPut) SetApiSubnetAllowList(v []string) { +// SetName sets field value +func (o *KubernetesClusterPropertiesForPut) SetName(v string) { - o.ApiSubnetAllowList = &v + o.Name = &v } -// HasApiSubnetAllowList returns a boolean if a field has been set. -func (o *KubernetesClusterPropertiesForPut) HasApiSubnetAllowList() bool { - if o != nil && o.ApiSubnetAllowList != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesClusterPropertiesForPut) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -200,7 +200,7 @@ func (o *KubernetesClusterPropertiesForPut) HasApiSubnetAllowList() bool { } // GetS3Buckets returns the S3Buckets field value -// If the value is explicit nil, the zero value for []S3Bucket will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesClusterPropertiesForPut) GetS3Buckets() *[]S3Bucket { if o == nil { return nil @@ -239,21 +239,26 @@ func (o *KubernetesClusterPropertiesForPut) HasS3Buckets() bool { func (o KubernetesClusterPropertiesForPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.ApiSubnetAllowList != nil { + toSerialize["apiSubnetAllowList"] = o.ApiSubnetAllowList } + if o.K8sVersion != nil { toSerialize["k8sVersion"] = o.K8sVersion } + if o.MaintenanceWindow != nil { toSerialize["maintenanceWindow"] = o.MaintenanceWindow } - if o.ApiSubnetAllowList != nil { - toSerialize["apiSubnetAllowList"] = o.ApiSubnetAllowList + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.S3Buckets != nil { toSerialize["s3Buckets"] = o.S3Buckets } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go index f1416d0eb..84f19c35c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go @@ -16,14 +16,14 @@ import ( // KubernetesClusters struct for KubernetesClusters type KubernetesClusters struct { - // The unique representation of the K8s cluster as a resource collection. - Id *string `json:"id,omitempty"` - // The resource type within a collection. - Type *string `json:"type,omitempty"` // The URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` + // The unique representation of the K8s cluster as a resource collection. + Id *string `json:"id,omitempty"` // Array of K8s clusters in the collection. Items *[]KubernetesCluster `json:"items,omitempty"` + // The resource type within a collection. + Type *string `json:"type,omitempty"` } // NewKubernetesClusters instantiates a new KubernetesClusters object @@ -44,152 +44,152 @@ func NewKubernetesClustersWithDefaults() *KubernetesClusters { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusters) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusters) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusters) GetIdOk() (*string, bool) { +func (o *KubernetesClusters) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesClusters) SetId(v string) { +// SetHref sets field value +func (o *KubernetesClusters) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesClusters) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesClusters) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusters) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusters) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusters) GetTypeOk() (*string, bool) { +func (o *KubernetesClusters) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesClusters) SetType(v string) { +// SetId sets field value +func (o *KubernetesClusters) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesClusters) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesClusters) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesClusters) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusters) GetItems() *[]KubernetesCluster { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusters) GetHrefOk() (*string, bool) { +func (o *KubernetesClusters) GetItemsOk() (*[]KubernetesCluster, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *KubernetesClusters) SetHref(v string) { +// SetItems sets field value +func (o *KubernetesClusters) SetItems(v []KubernetesCluster) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesClusters) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *KubernetesClusters) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []KubernetesCluster will be returned -func (o *KubernetesClusters) GetItems() *[]KubernetesCluster { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesClusters) GetType() *string { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesClusters) GetItemsOk() (*[]KubernetesCluster, bool) { +func (o *KubernetesClusters) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *KubernetesClusters) SetItems(v []KubernetesCluster) { +// SetType sets field value +func (o *KubernetesClusters) SetType(v string) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *KubernetesClusters) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesClusters) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *KubernetesClusters) HasItems() bool { func (o KubernetesClusters) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go index d5decee54..86d576253 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go @@ -44,7 +44,7 @@ func NewKubernetesMaintenanceWindowWithDefaults() *KubernetesMaintenanceWindow { } // GetDayOfTheWeek returns the DayOfTheWeek field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesMaintenanceWindow) GetDayOfTheWeek() *string { if o == nil { return nil @@ -82,7 +82,7 @@ func (o *KubernetesMaintenanceWindow) HasDayOfTheWeek() bool { } // GetTime returns the Time field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesMaintenanceWindow) GetTime() *string { if o == nil { return nil @@ -124,9 +124,11 @@ func (o KubernetesMaintenanceWindow) MarshalJSON() ([]byte, error) { if o.DayOfTheWeek != nil { toSerialize["dayOfTheWeek"] = o.DayOfTheWeek } + if o.Time != nil { toSerialize["time"] = o.Time } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go index 936864aec..512c52312 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go @@ -16,14 +16,14 @@ import ( // KubernetesNode struct for KubernetesNode type KubernetesNode struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The object type. - Type *string `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *KubernetesNodeMetadata `json:"metadata,omitempty"` Properties *KubernetesNodeProperties `json:"properties"` + // The object type. + Type *string `json:"type,omitempty"` } // NewKubernetesNode instantiates a new KubernetesNode object @@ -46,190 +46,190 @@ func NewKubernetesNodeWithDefaults() *KubernetesNode { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNode) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNode) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNode) GetIdOk() (*string, bool) { +func (o *KubernetesNode) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesNode) SetId(v string) { +// SetHref sets field value +func (o *KubernetesNode) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNode) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesNode) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNode) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNode) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNode) GetTypeOk() (*string, bool) { +func (o *KubernetesNode) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesNode) SetType(v string) { +// SetId sets field value +func (o *KubernetesNode) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesNode) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNode) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNode) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNode) GetMetadata() *KubernetesNodeMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNode) GetHrefOk() (*string, bool) { +func (o *KubernetesNode) GetMetadataOk() (*KubernetesNodeMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *KubernetesNode) SetHref(v string) { +// SetMetadata sets field value +func (o *KubernetesNode) SetMetadata(v KubernetesNodeMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesNode) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *KubernetesNode) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for KubernetesNodeMetadata will be returned -func (o *KubernetesNode) GetMetadata() *KubernetesNodeMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNode) GetProperties() *KubernetesNodeProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNode) GetMetadataOk() (*KubernetesNodeMetadata, bool) { +func (o *KubernetesNode) GetPropertiesOk() (*KubernetesNodeProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *KubernetesNode) SetMetadata(v KubernetesNodeMetadata) { +// SetProperties sets field value +func (o *KubernetesNode) SetProperties(v KubernetesNodeProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *KubernetesNode) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *KubernetesNode) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesNodeProperties will be returned -func (o *KubernetesNode) GetProperties() *KubernetesNodeProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNode) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNode) GetPropertiesOk() (*KubernetesNodeProperties, bool) { +func (o *KubernetesNode) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *KubernetesNode) SetProperties(v KubernetesNodeProperties) { +// SetType sets field value +func (o *KubernetesNode) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *KubernetesNode) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesNode) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *KubernetesNode) HasProperties() bool { func (o KubernetesNode) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go index 8110ddd61..3915ed803 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go @@ -17,16 +17,16 @@ import ( // KubernetesNodeMetadata struct for KubernetesNodeMetadata type KubernetesNodeMetadata struct { - // The resource entity tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity tags are also added as 'ETag' response headers to requests that do not use the 'depth' parameter. - Etag *string `json:"etag,omitempty"` // The date the resource was created. CreatedDate *IonosTime + // The resource entity tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity tags are also added as 'ETag' response headers to requests that do not use the 'depth' parameter. + Etag *string `json:"etag,omitempty"` // The date the resource was last modified. LastModifiedDate *IonosTime - // The resource state. - State *string `json:"state,omitempty"` // The date when the software on the node was last updated. LastSoftwareUpdatedDate *IonosTime + // The resource state. + State *string `json:"state,omitempty"` } // NewKubernetesNodeMetadata instantiates a new KubernetesNodeMetadata object @@ -47,83 +47,83 @@ func NewKubernetesNodeMetadataWithDefaults() *KubernetesNodeMetadata { return &this } -// GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodeMetadata) GetEtag() *string { +// GetCreatedDate returns the CreatedDate field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeMetadata) GetCreatedDate() *time.Time { if o == nil { return nil } - return o.Etag + if o.CreatedDate == nil { + return nil + } + return &o.CreatedDate.Time } -// GetEtagOk returns a tuple with the Etag field value +// GetCreatedDateOk returns a tuple with the CreatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeMetadata) GetEtagOk() (*string, bool) { +func (o *KubernetesNodeMetadata) GetCreatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - return o.Etag, true + if o.CreatedDate == nil { + return nil, false + } + return &o.CreatedDate.Time, true + } -// SetEtag sets field value -func (o *KubernetesNodeMetadata) SetEtag(v string) { +// SetCreatedDate sets field value +func (o *KubernetesNodeMetadata) SetCreatedDate(v time.Time) { - o.Etag = &v + o.CreatedDate = &IonosTime{v} } -// HasEtag returns a boolean if a field has been set. -func (o *KubernetesNodeMetadata) HasEtag() bool { - if o != nil && o.Etag != nil { +// HasCreatedDate returns a boolean if a field has been set. +func (o *KubernetesNodeMetadata) HasCreatedDate() bool { + if o != nil && o.CreatedDate != nil { return true } return false } -// GetCreatedDate returns the CreatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *KubernetesNodeMetadata) GetCreatedDate() *time.Time { +// GetEtag returns the Etag field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeMetadata) GetEtag() *string { if o == nil { return nil } - if o.CreatedDate == nil { - return nil - } - return &o.CreatedDate.Time + return o.Etag } -// GetCreatedDateOk returns a tuple with the CreatedDate field value +// GetEtagOk returns a tuple with the Etag field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeMetadata) GetCreatedDateOk() (*time.Time, bool) { +func (o *KubernetesNodeMetadata) GetEtagOk() (*string, bool) { if o == nil { return nil, false } - if o.CreatedDate == nil { - return nil, false - } - return &o.CreatedDate.Time, true - + return o.Etag, true } -// SetCreatedDate sets field value -func (o *KubernetesNodeMetadata) SetCreatedDate(v time.Time) { +// SetEtag sets field value +func (o *KubernetesNodeMetadata) SetEtag(v string) { - o.CreatedDate = &IonosTime{v} + o.Etag = &v } -// HasCreatedDate returns a boolean if a field has been set. -func (o *KubernetesNodeMetadata) HasCreatedDate() bool { - if o != nil && o.CreatedDate != nil { +// HasEtag returns a boolean if a field has been set. +func (o *KubernetesNodeMetadata) HasEtag() bool { + if o != nil && o.Etag != nil { return true } @@ -131,7 +131,7 @@ func (o *KubernetesNodeMetadata) HasCreatedDate() bool { } // GetLastModifiedDate returns the LastModifiedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodeMetadata) GetLastModifiedDate() *time.Time { if o == nil { return nil @@ -175,83 +175,83 @@ func (o *KubernetesNodeMetadata) HasLastModifiedDate() bool { return false } -// GetState returns the State field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodeMetadata) GetState() *string { +// GetLastSoftwareUpdatedDate returns the LastSoftwareUpdatedDate field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeMetadata) GetLastSoftwareUpdatedDate() *time.Time { if o == nil { return nil } - return o.State + if o.LastSoftwareUpdatedDate == nil { + return nil + } + return &o.LastSoftwareUpdatedDate.Time } -// GetStateOk returns a tuple with the State field value +// GetLastSoftwareUpdatedDateOk returns a tuple with the LastSoftwareUpdatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeMetadata) GetStateOk() (*string, bool) { +func (o *KubernetesNodeMetadata) GetLastSoftwareUpdatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - return o.State, true + if o.LastSoftwareUpdatedDate == nil { + return nil, false + } + return &o.LastSoftwareUpdatedDate.Time, true + } -// SetState sets field value -func (o *KubernetesNodeMetadata) SetState(v string) { +// SetLastSoftwareUpdatedDate sets field value +func (o *KubernetesNodeMetadata) SetLastSoftwareUpdatedDate(v time.Time) { - o.State = &v + o.LastSoftwareUpdatedDate = &IonosTime{v} } -// HasState returns a boolean if a field has been set. -func (o *KubernetesNodeMetadata) HasState() bool { - if o != nil && o.State != nil { +// HasLastSoftwareUpdatedDate returns a boolean if a field has been set. +func (o *KubernetesNodeMetadata) HasLastSoftwareUpdatedDate() bool { + if o != nil && o.LastSoftwareUpdatedDate != nil { return true } return false } -// GetLastSoftwareUpdatedDate returns the LastSoftwareUpdatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *KubernetesNodeMetadata) GetLastSoftwareUpdatedDate() *time.Time { +// GetState returns the State field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeMetadata) GetState() *string { if o == nil { return nil } - if o.LastSoftwareUpdatedDate == nil { - return nil - } - return &o.LastSoftwareUpdatedDate.Time + return o.State } -// GetLastSoftwareUpdatedDateOk returns a tuple with the LastSoftwareUpdatedDate field value +// GetStateOk returns a tuple with the State field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeMetadata) GetLastSoftwareUpdatedDateOk() (*time.Time, bool) { +func (o *KubernetesNodeMetadata) GetStateOk() (*string, bool) { if o == nil { return nil, false } - if o.LastSoftwareUpdatedDate == nil { - return nil, false - } - return &o.LastSoftwareUpdatedDate.Time, true - + return o.State, true } -// SetLastSoftwareUpdatedDate sets field value -func (o *KubernetesNodeMetadata) SetLastSoftwareUpdatedDate(v time.Time) { +// SetState sets field value +func (o *KubernetesNodeMetadata) SetState(v string) { - o.LastSoftwareUpdatedDate = &IonosTime{v} + o.State = &v } -// HasLastSoftwareUpdatedDate returns a boolean if a field has been set. -func (o *KubernetesNodeMetadata) HasLastSoftwareUpdatedDate() bool { - if o != nil && o.LastSoftwareUpdatedDate != nil { +// HasState returns a boolean if a field has been set. +func (o *KubernetesNodeMetadata) HasState() bool { + if o != nil && o.State != nil { return true } @@ -260,21 +260,26 @@ func (o *KubernetesNodeMetadata) HasLastSoftwareUpdatedDate() bool { func (o KubernetesNodeMetadata) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Etag != nil { - toSerialize["etag"] = o.Etag - } if o.CreatedDate != nil { toSerialize["createdDate"] = o.CreatedDate } + + if o.Etag != nil { + toSerialize["etag"] = o.Etag + } + if o.LastModifiedDate != nil { toSerialize["lastModifiedDate"] = o.LastModifiedDate } - if o.State != nil { - toSerialize["state"] = o.State - } + if o.LastSoftwareUpdatedDate != nil { toSerialize["lastSoftwareUpdatedDate"] = o.LastSoftwareUpdatedDate } + + if o.State != nil { + toSerialize["state"] = o.State + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go index a634f0a8b..9649a8ad8 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go @@ -16,14 +16,14 @@ import ( // KubernetesNodePool struct for KubernetesNodePool type KubernetesNodePool struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The object type. - Type *string `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesNodePoolProperties `json:"properties"` + // The object type. + Type *string `json:"type,omitempty"` } // NewKubernetesNodePool instantiates a new KubernetesNodePool object @@ -46,190 +46,190 @@ func NewKubernetesNodePoolWithDefaults() *KubernetesNodePool { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePool) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePool) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePool) GetIdOk() (*string, bool) { +func (o *KubernetesNodePool) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesNodePool) SetId(v string) { +// SetHref sets field value +func (o *KubernetesNodePool) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNodePool) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesNodePool) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePool) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePool) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePool) GetTypeOk() (*string, bool) { +func (o *KubernetesNodePool) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesNodePool) SetType(v string) { +// SetId sets field value +func (o *KubernetesNodePool) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesNodePool) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNodePool) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePool) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePool) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePool) GetHrefOk() (*string, bool) { +func (o *KubernetesNodePool) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *KubernetesNodePool) SetHref(v string) { +// SetMetadata sets field value +func (o *KubernetesNodePool) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesNodePool) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *KubernetesNodePool) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *KubernetesNodePool) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePool) GetProperties() *KubernetesNodePoolProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePool) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *KubernetesNodePool) GetPropertiesOk() (*KubernetesNodePoolProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *KubernetesNodePool) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *KubernetesNodePool) SetProperties(v KubernetesNodePoolProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *KubernetesNodePool) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *KubernetesNodePool) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesNodePoolProperties will be returned -func (o *KubernetesNodePool) GetProperties() *KubernetesNodePoolProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePool) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePool) GetPropertiesOk() (*KubernetesNodePoolProperties, bool) { +func (o *KubernetesNodePool) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *KubernetesNodePool) SetProperties(v KubernetesNodePoolProperties) { +// SetType sets field value +func (o *KubernetesNodePool) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *KubernetesNodePool) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesNodePool) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *KubernetesNodePool) HasProperties() bool { func (o KubernetesNodePool) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go index 9a89d3d18..afeef99c7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go @@ -16,14 +16,14 @@ import ( // KubernetesNodePoolForPost struct for KubernetesNodePoolForPost type KubernetesNodePoolForPost struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The object type. - Type *string `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesNodePoolPropertiesForPost `json:"properties"` + // The object type. + Type *string `json:"type,omitempty"` } // NewKubernetesNodePoolForPost instantiates a new KubernetesNodePoolForPost object @@ -46,190 +46,190 @@ func NewKubernetesNodePoolForPostWithDefaults() *KubernetesNodePoolForPost { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolForPost) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPost) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPost) GetIdOk() (*string, bool) { +func (o *KubernetesNodePoolForPost) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesNodePoolForPost) SetId(v string) { +// SetHref sets field value +func (o *KubernetesNodePoolForPost) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPost) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPost) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolForPost) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPost) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPost) GetTypeOk() (*string, bool) { +func (o *KubernetesNodePoolForPost) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesNodePoolForPost) SetType(v string) { +// SetId sets field value +func (o *KubernetesNodePoolForPost) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPost) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPost) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolForPost) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPost) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPost) GetHrefOk() (*string, bool) { +func (o *KubernetesNodePoolForPost) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *KubernetesNodePoolForPost) SetHref(v string) { +// SetMetadata sets field value +func (o *KubernetesNodePoolForPost) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPost) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPost) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *KubernetesNodePoolForPost) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPost) GetProperties() *KubernetesNodePoolPropertiesForPost { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPost) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *KubernetesNodePoolForPost) GetPropertiesOk() (*KubernetesNodePoolPropertiesForPost, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *KubernetesNodePoolForPost) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *KubernetesNodePoolForPost) SetProperties(v KubernetesNodePoolPropertiesForPost) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPost) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPost) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesNodePoolPropertiesForPost will be returned -func (o *KubernetesNodePoolForPost) GetProperties() *KubernetesNodePoolPropertiesForPost { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPost) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPost) GetPropertiesOk() (*KubernetesNodePoolPropertiesForPost, bool) { +func (o *KubernetesNodePoolForPost) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *KubernetesNodePoolForPost) SetProperties(v KubernetesNodePoolPropertiesForPost) { +// SetType sets field value +func (o *KubernetesNodePoolForPost) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPost) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPost) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *KubernetesNodePoolForPost) HasProperties() bool { func (o KubernetesNodePoolForPost) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go index 10fa4f290..94df03dd9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go @@ -16,14 +16,14 @@ import ( // KubernetesNodePoolForPut struct for KubernetesNodePoolForPut type KubernetesNodePoolForPut struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The object type. - Type *string `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesNodePoolPropertiesForPut `json:"properties"` + // The object type. + Type *string `json:"type,omitempty"` } // NewKubernetesNodePoolForPut instantiates a new KubernetesNodePoolForPut object @@ -46,190 +46,190 @@ func NewKubernetesNodePoolForPutWithDefaults() *KubernetesNodePoolForPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolForPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPut) GetIdOk() (*string, bool) { +func (o *KubernetesNodePoolForPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesNodePoolForPut) SetId(v string) { +// SetHref sets field value +func (o *KubernetesNodePoolForPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolForPut) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPut) GetTypeOk() (*string, bool) { +func (o *KubernetesNodePoolForPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesNodePoolForPut) SetType(v string) { +// SetId sets field value +func (o *KubernetesNodePoolForPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolForPut) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPut) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPut) GetHrefOk() (*string, bool) { +func (o *KubernetesNodePoolForPut) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *KubernetesNodePoolForPut) SetHref(v string) { +// SetMetadata sets field value +func (o *KubernetesNodePoolForPut) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPut) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *KubernetesNodePoolForPut) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPut) GetProperties() *KubernetesNodePoolPropertiesForPut { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPut) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *KubernetesNodePoolForPut) GetPropertiesOk() (*KubernetesNodePoolPropertiesForPut, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *KubernetesNodePoolForPut) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *KubernetesNodePoolForPut) SetProperties(v KubernetesNodePoolPropertiesForPut) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPut) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for KubernetesNodePoolPropertiesForPut will be returned -func (o *KubernetesNodePoolForPut) GetProperties() *KubernetesNodePoolPropertiesForPut { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolForPut) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolForPut) GetPropertiesOk() (*KubernetesNodePoolPropertiesForPut, bool) { +func (o *KubernetesNodePoolForPut) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *KubernetesNodePoolForPut) SetProperties(v KubernetesNodePoolPropertiesForPut) { +// SetType sets field value +func (o *KubernetesNodePoolForPut) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *KubernetesNodePoolForPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesNodePoolForPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *KubernetesNodePoolForPut) HasProperties() bool { func (o KubernetesNodePoolForPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go index 012c0f4b7..926f1498a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go @@ -18,10 +18,10 @@ import ( type KubernetesNodePoolLan struct { // The datacenter ID, requires system privileges, for internal usage only DatacenterId *string `json:"datacenterId,omitempty"` - // The LAN ID of an existing LAN at the related data center - Id *int32 `json:"id"` // Specifies whether the Kubernetes node pool LAN reserves an IP with DHCP. Dhcp *bool `json:"dhcp,omitempty"` + // The LAN ID of an existing LAN at the related data center + Id *int32 `json:"id"` // The array of additional LANs attached to worker nodes. Routes *[]KubernetesNodePoolLanRoutes `json:"routes,omitempty"` } @@ -47,7 +47,7 @@ func NewKubernetesNodePoolLanWithDefaults() *KubernetesNodePoolLan { } // GetDatacenterId returns the DatacenterId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodePoolLan) GetDatacenterId() *string { if o == nil { return nil @@ -84,76 +84,76 @@ func (o *KubernetesNodePoolLan) HasDatacenterId() bool { return false } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolLan) GetId() *int32 { +// GetDhcp returns the Dhcp field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolLan) GetDhcp() *bool { if o == nil { return nil } - return o.Id + return o.Dhcp } -// GetIdOk returns a tuple with the Id field value +// GetDhcpOk returns a tuple with the Dhcp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolLan) GetIdOk() (*int32, bool) { +func (o *KubernetesNodePoolLan) GetDhcpOk() (*bool, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Dhcp, true } -// SetId sets field value -func (o *KubernetesNodePoolLan) SetId(v int32) { +// SetDhcp sets field value +func (o *KubernetesNodePoolLan) SetDhcp(v bool) { - o.Id = &v + o.Dhcp = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNodePoolLan) HasId() bool { - if o != nil && o.Id != nil { +// HasDhcp returns a boolean if a field has been set. +func (o *KubernetesNodePoolLan) HasDhcp() bool { + if o != nil && o.Dhcp != nil { return true } return false } -// GetDhcp returns the Dhcp field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *KubernetesNodePoolLan) GetDhcp() *bool { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolLan) GetId() *int32 { if o == nil { return nil } - return o.Dhcp + return o.Id } -// GetDhcpOk returns a tuple with the Dhcp field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolLan) GetDhcpOk() (*bool, bool) { +func (o *KubernetesNodePoolLan) GetIdOk() (*int32, bool) { if o == nil { return nil, false } - return o.Dhcp, true + return o.Id, true } -// SetDhcp sets field value -func (o *KubernetesNodePoolLan) SetDhcp(v bool) { +// SetId sets field value +func (o *KubernetesNodePoolLan) SetId(v int32) { - o.Dhcp = &v + o.Id = &v } -// HasDhcp returns a boolean if a field has been set. -func (o *KubernetesNodePoolLan) HasDhcp() bool { - if o != nil && o.Dhcp != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNodePoolLan) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -161,7 +161,7 @@ func (o *KubernetesNodePoolLan) HasDhcp() bool { } // GetRoutes returns the Routes field value -// If the value is explicit nil, the zero value for []KubernetesNodePoolLanRoutes will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodePoolLan) GetRoutes() *[]KubernetesNodePoolLanRoutes { if o == nil { return nil @@ -203,15 +203,19 @@ func (o KubernetesNodePoolLan) MarshalJSON() ([]byte, error) { if o.DatacenterId != nil { toSerialize["datacenterId"] = o.DatacenterId } - if o.Id != nil { - toSerialize["id"] = o.Id - } + if o.Dhcp != nil { toSerialize["dhcp"] = o.Dhcp } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Routes != nil { toSerialize["routes"] = o.Routes } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan_routes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan_routes.go index 0d57c4592..7ba035e44 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan_routes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan_routes.go @@ -16,10 +16,10 @@ import ( // KubernetesNodePoolLanRoutes struct for KubernetesNodePoolLanRoutes type KubernetesNodePoolLanRoutes struct { - // IPv4 or IPv6 CIDR to be routed via the interface. - Network *string `json:"network,omitempty"` // IPv4 or IPv6 Gateway IP for the route. GatewayIp *string `json:"gatewayIp,omitempty"` + // IPv4 or IPv6 CIDR to be routed via the interface. + Network *string `json:"network,omitempty"` } // NewKubernetesNodePoolLanRoutes instantiates a new KubernetesNodePoolLanRoutes object @@ -40,76 +40,76 @@ func NewKubernetesNodePoolLanRoutesWithDefaults() *KubernetesNodePoolLanRoutes { return &this } -// GetNetwork returns the Network field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolLanRoutes) GetNetwork() *string { +// GetGatewayIp returns the GatewayIp field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolLanRoutes) GetGatewayIp() *string { if o == nil { return nil } - return o.Network + return o.GatewayIp } -// GetNetworkOk returns a tuple with the Network field value +// GetGatewayIpOk returns a tuple with the GatewayIp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolLanRoutes) GetNetworkOk() (*string, bool) { +func (o *KubernetesNodePoolLanRoutes) GetGatewayIpOk() (*string, bool) { if o == nil { return nil, false } - return o.Network, true + return o.GatewayIp, true } -// SetNetwork sets field value -func (o *KubernetesNodePoolLanRoutes) SetNetwork(v string) { +// SetGatewayIp sets field value +func (o *KubernetesNodePoolLanRoutes) SetGatewayIp(v string) { - o.Network = &v + o.GatewayIp = &v } -// HasNetwork returns a boolean if a field has been set. -func (o *KubernetesNodePoolLanRoutes) HasNetwork() bool { - if o != nil && o.Network != nil { +// HasGatewayIp returns a boolean if a field has been set. +func (o *KubernetesNodePoolLanRoutes) HasGatewayIp() bool { + if o != nil && o.GatewayIp != nil { return true } return false } -// GetGatewayIp returns the GatewayIp field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolLanRoutes) GetGatewayIp() *string { +// GetNetwork returns the Network field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolLanRoutes) GetNetwork() *string { if o == nil { return nil } - return o.GatewayIp + return o.Network } -// GetGatewayIpOk returns a tuple with the GatewayIp field value +// GetNetworkOk returns a tuple with the Network field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolLanRoutes) GetGatewayIpOk() (*string, bool) { +func (o *KubernetesNodePoolLanRoutes) GetNetworkOk() (*string, bool) { if o == nil { return nil, false } - return o.GatewayIp, true + return o.Network, true } -// SetGatewayIp sets field value -func (o *KubernetesNodePoolLanRoutes) SetGatewayIp(v string) { +// SetNetwork sets field value +func (o *KubernetesNodePoolLanRoutes) SetNetwork(v string) { - o.GatewayIp = &v + o.Network = &v } -// HasGatewayIp returns a boolean if a field has been set. -func (o *KubernetesNodePoolLanRoutes) HasGatewayIp() bool { - if o != nil && o.GatewayIp != nil { +// HasNetwork returns a boolean if a field has been set. +func (o *KubernetesNodePoolLanRoutes) HasNetwork() bool { + if o != nil && o.Network != nil { return true } @@ -118,12 +118,14 @@ func (o *KubernetesNodePoolLanRoutes) HasGatewayIp() bool { func (o KubernetesNodePoolLanRoutes) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Network != nil { - toSerialize["network"] = o.Network - } if o.GatewayIp != nil { toSerialize["gatewayIp"] = o.GatewayIp } + + if o.Network != nil { + toSerialize["network"] = o.Network + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go index 5e18701de..376120c0c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go @@ -16,56 +16,56 @@ import ( // KubernetesNodePoolProperties struct for KubernetesNodePoolProperties type KubernetesNodePoolProperties struct { - // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. - Name *string `json:"name"` + // The annotations attached to the node pool. + Annotations *map[string]string `json:"annotations,omitempty"` + AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` + // The availability zone in which the target VM should be provisioned. + AvailabilityZone *string `json:"availabilityZone"` + // The list of available versions for upgrading the node pool. + AvailableUpgradeVersions *[]string `json:"availableUpgradeVersions,omitempty"` + // The total number of cores for the nodes. + CoresCount *int32 `json:"coresCount"` + // The CPU type for the nodes. + CpuFamily *string `json:"cpuFamily"` // The unique identifier of the VDC where the worker nodes of the node pool are provisioned.Note that the data center is located in the exact place where the parent cluster of the node pool is located. DatacenterId *string `json:"datacenterId"` + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. + K8sVersion *string `json:"k8sVersion,omitempty"` + // The labels attached to the node pool. + Labels *map[string]string `json:"labels,omitempty"` + // The array of existing private LANs to attach to worker nodes. + Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` + MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` + // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + Name *string `json:"name"` // The number of worker nodes of the node pool. NodeCount *int32 `json:"nodeCount"` - // The CPU type for the nodes. - CpuFamily *string `json:"cpuFamily"` - // The total number of cores for the nodes. - CoresCount *int32 `json:"coresCount"` + // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. + PublicIps *[]string `json:"publicIps,omitempty"` // The RAM size for the nodes. Must be specified in multiples of 1024 MB, with a minimum size of 2048 MB. RamSize *int32 `json:"ramSize"` - // The availability zone in which the target VM should be provisioned. - AvailabilityZone *string `json:"availabilityZone"` - // The storage type for the nodes. - StorageType *string `json:"storageType"` // The allocated volume size in GB. The allocated volume size in GB. To achieve good performance, we recommend a size greater than 100GB for SSD. StorageSize *int32 `json:"storageSize"` - // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. - K8sVersion *string `json:"k8sVersion,omitempty"` - MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` - // The array of existing private LANs to attach to worker nodes. - Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` - // The labels attached to the node pool. - Labels *map[string]string `json:"labels,omitempty"` - // The annotations attached to the node pool. - Annotations *map[string]string `json:"annotations,omitempty"` - // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. - PublicIps *[]string `json:"publicIps,omitempty"` - // The list of available versions for upgrading the node pool. - AvailableUpgradeVersions *[]string `json:"availableUpgradeVersions,omitempty"` + // The storage type for the nodes. + StorageType *string `json:"storageType"` } // NewKubernetesNodePoolProperties instantiates a new KubernetesNodePoolProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewKubernetesNodePoolProperties(name string, datacenterId string, nodeCount int32, cpuFamily string, coresCount int32, ramSize int32, availabilityZone string, storageType string, storageSize int32) *KubernetesNodePoolProperties { +func NewKubernetesNodePoolProperties(availabilityZone string, coresCount int32, cpuFamily string, datacenterId string, name string, nodeCount int32, ramSize int32, storageSize int32, storageType string) *KubernetesNodePoolProperties { this := KubernetesNodePoolProperties{} - this.Name = &name + this.AvailabilityZone = &availabilityZone + this.CoresCount = &coresCount + this.CpuFamily = &cpuFamily this.DatacenterId = &datacenterId + this.Name = &name this.NodeCount = &nodeCount - this.CpuFamily = &cpuFamily - this.CoresCount = &coresCount this.RamSize = &ramSize - this.AvailabilityZone = &availabilityZone - this.StorageType = &storageType this.StorageSize = &storageSize + this.StorageType = &storageType return &this } @@ -78,152 +78,152 @@ func NewKubernetesNodePoolPropertiesWithDefaults() *KubernetesNodePoolProperties return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolProperties) GetName() *string { +// GetAnnotations returns the Annotations field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetAnnotations() *map[string]string { if o == nil { return nil } - return o.Name + return o.Annotations } -// GetNameOk returns a tuple with the Name field value +// GetAnnotationsOk returns a tuple with the Annotations field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetNameOk() (*string, bool) { +func (o *KubernetesNodePoolProperties) GetAnnotationsOk() (*map[string]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Annotations, true } -// SetName sets field value -func (o *KubernetesNodePoolProperties) SetName(v string) { +// SetAnnotations sets field value +func (o *KubernetesNodePoolProperties) SetAnnotations(v map[string]string) { - o.Name = &v + o.Annotations = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasAnnotations returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasAnnotations() bool { + if o != nil && o.Annotations != nil { return true } return false } -// GetDatacenterId returns the DatacenterId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolProperties) GetDatacenterId() *string { +// GetAutoScaling returns the AutoScaling field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetAutoScaling() *KubernetesAutoScaling { if o == nil { return nil } - return o.DatacenterId + return o.AutoScaling } -// GetDatacenterIdOk returns a tuple with the DatacenterId field value +// GetAutoScalingOk returns a tuple with the AutoScaling field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetDatacenterIdOk() (*string, bool) { +func (o *KubernetesNodePoolProperties) GetAutoScalingOk() (*KubernetesAutoScaling, bool) { if o == nil { return nil, false } - return o.DatacenterId, true + return o.AutoScaling, true } -// SetDatacenterId sets field value -func (o *KubernetesNodePoolProperties) SetDatacenterId(v string) { +// SetAutoScaling sets field value +func (o *KubernetesNodePoolProperties) SetAutoScaling(v KubernetesAutoScaling) { - o.DatacenterId = &v + o.AutoScaling = &v } -// HasDatacenterId returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasDatacenterId() bool { - if o != nil && o.DatacenterId != nil { +// HasAutoScaling returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasAutoScaling() bool { + if o != nil && o.AutoScaling != nil { return true } return false } -// GetNodeCount returns the NodeCount field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolProperties) GetNodeCount() *int32 { +// GetAvailabilityZone returns the AvailabilityZone field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetAvailabilityZone() *string { if o == nil { return nil } - return o.NodeCount + return o.AvailabilityZone } -// GetNodeCountOk returns a tuple with the NodeCount field value +// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetNodeCountOk() (*int32, bool) { +func (o *KubernetesNodePoolProperties) GetAvailabilityZoneOk() (*string, bool) { if o == nil { return nil, false } - return o.NodeCount, true + return o.AvailabilityZone, true } -// SetNodeCount sets field value -func (o *KubernetesNodePoolProperties) SetNodeCount(v int32) { +// SetAvailabilityZone sets field value +func (o *KubernetesNodePoolProperties) SetAvailabilityZone(v string) { - o.NodeCount = &v + o.AvailabilityZone = &v } -// HasNodeCount returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasNodeCount() bool { - if o != nil && o.NodeCount != nil { +// HasAvailabilityZone returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasAvailabilityZone() bool { + if o != nil && o.AvailabilityZone != nil { return true } return false } -// GetCpuFamily returns the CpuFamily field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolProperties) GetCpuFamily() *string { +// GetAvailableUpgradeVersions returns the AvailableUpgradeVersions field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetAvailableUpgradeVersions() *[]string { if o == nil { return nil } - return o.CpuFamily + return o.AvailableUpgradeVersions } -// GetCpuFamilyOk returns a tuple with the CpuFamily field value +// GetAvailableUpgradeVersionsOk returns a tuple with the AvailableUpgradeVersions field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetCpuFamilyOk() (*string, bool) { +func (o *KubernetesNodePoolProperties) GetAvailableUpgradeVersionsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.CpuFamily, true + return o.AvailableUpgradeVersions, true } -// SetCpuFamily sets field value -func (o *KubernetesNodePoolProperties) SetCpuFamily(v string) { +// SetAvailableUpgradeVersions sets field value +func (o *KubernetesNodePoolProperties) SetAvailableUpgradeVersions(v []string) { - o.CpuFamily = &v + o.AvailableUpgradeVersions = &v } -// HasCpuFamily returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasCpuFamily() bool { - if o != nil && o.CpuFamily != nil { +// HasAvailableUpgradeVersions returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasAvailableUpgradeVersions() bool { + if o != nil && o.AvailableUpgradeVersions != nil { return true } @@ -231,7 +231,7 @@ func (o *KubernetesNodePoolProperties) HasCpuFamily() bool { } // GetCoresCount returns the CoresCount field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodePoolProperties) GetCoresCount() *int32 { if o == nil { return nil @@ -268,190 +268,190 @@ func (o *KubernetesNodePoolProperties) HasCoresCount() bool { return false } -// GetRamSize returns the RamSize field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolProperties) GetRamSize() *int32 { +// GetCpuFamily returns the CpuFamily field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetCpuFamily() *string { if o == nil { return nil } - return o.RamSize + return o.CpuFamily } -// GetRamSizeOk returns a tuple with the RamSize field value +// GetCpuFamilyOk returns a tuple with the CpuFamily field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetRamSizeOk() (*int32, bool) { +func (o *KubernetesNodePoolProperties) GetCpuFamilyOk() (*string, bool) { if o == nil { return nil, false } - return o.RamSize, true + return o.CpuFamily, true } -// SetRamSize sets field value -func (o *KubernetesNodePoolProperties) SetRamSize(v int32) { +// SetCpuFamily sets field value +func (o *KubernetesNodePoolProperties) SetCpuFamily(v string) { - o.RamSize = &v + o.CpuFamily = &v } -// HasRamSize returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasRamSize() bool { - if o != nil && o.RamSize != nil { +// HasCpuFamily returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasCpuFamily() bool { + if o != nil && o.CpuFamily != nil { return true } return false } -// GetAvailabilityZone returns the AvailabilityZone field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolProperties) GetAvailabilityZone() *string { +// GetDatacenterId returns the DatacenterId field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetDatacenterId() *string { if o == nil { return nil } - return o.AvailabilityZone + return o.DatacenterId } -// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value +// GetDatacenterIdOk returns a tuple with the DatacenterId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetAvailabilityZoneOk() (*string, bool) { +func (o *KubernetesNodePoolProperties) GetDatacenterIdOk() (*string, bool) { if o == nil { return nil, false } - return o.AvailabilityZone, true + return o.DatacenterId, true } -// SetAvailabilityZone sets field value -func (o *KubernetesNodePoolProperties) SetAvailabilityZone(v string) { +// SetDatacenterId sets field value +func (o *KubernetesNodePoolProperties) SetDatacenterId(v string) { - o.AvailabilityZone = &v + o.DatacenterId = &v } -// HasAvailabilityZone returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasAvailabilityZone() bool { - if o != nil && o.AvailabilityZone != nil { +// HasDatacenterId returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasDatacenterId() bool { + if o != nil && o.DatacenterId != nil { return true } return false } -// GetStorageType returns the StorageType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolProperties) GetStorageType() *string { +// GetK8sVersion returns the K8sVersion field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetK8sVersion() *string { if o == nil { return nil } - return o.StorageType + return o.K8sVersion } -// GetStorageTypeOk returns a tuple with the StorageType field value +// GetK8sVersionOk returns a tuple with the K8sVersion field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetStorageTypeOk() (*string, bool) { +func (o *KubernetesNodePoolProperties) GetK8sVersionOk() (*string, bool) { if o == nil { return nil, false } - return o.StorageType, true + return o.K8sVersion, true } -// SetStorageType sets field value -func (o *KubernetesNodePoolProperties) SetStorageType(v string) { +// SetK8sVersion sets field value +func (o *KubernetesNodePoolProperties) SetK8sVersion(v string) { - o.StorageType = &v + o.K8sVersion = &v } -// HasStorageType returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasStorageType() bool { - if o != nil && o.StorageType != nil { +// HasK8sVersion returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasK8sVersion() bool { + if o != nil && o.K8sVersion != nil { return true } return false } -// GetStorageSize returns the StorageSize field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolProperties) GetStorageSize() *int32 { +// GetLabels returns the Labels field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetLabels() *map[string]string { if o == nil { return nil } - return o.StorageSize + return o.Labels } -// GetStorageSizeOk returns a tuple with the StorageSize field value +// GetLabelsOk returns a tuple with the Labels field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetStorageSizeOk() (*int32, bool) { +func (o *KubernetesNodePoolProperties) GetLabelsOk() (*map[string]string, bool) { if o == nil { return nil, false } - return o.StorageSize, true + return o.Labels, true } -// SetStorageSize sets field value -func (o *KubernetesNodePoolProperties) SetStorageSize(v int32) { +// SetLabels sets field value +func (o *KubernetesNodePoolProperties) SetLabels(v map[string]string) { - o.StorageSize = &v + o.Labels = &v } -// HasStorageSize returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasStorageSize() bool { - if o != nil && o.StorageSize != nil { +// HasLabels returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasLabels() bool { + if o != nil && o.Labels != nil { return true } return false } -// GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolProperties) GetK8sVersion() *string { +// GetLans returns the Lans field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetLans() *[]KubernetesNodePoolLan { if o == nil { return nil } - return o.K8sVersion + return o.Lans } -// GetK8sVersionOk returns a tuple with the K8sVersion field value +// GetLansOk returns a tuple with the Lans field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetK8sVersionOk() (*string, bool) { +func (o *KubernetesNodePoolProperties) GetLansOk() (*[]KubernetesNodePoolLan, bool) { if o == nil { return nil, false } - return o.K8sVersion, true + return o.Lans, true } -// SetK8sVersion sets field value -func (o *KubernetesNodePoolProperties) SetK8sVersion(v string) { +// SetLans sets field value +func (o *KubernetesNodePoolProperties) SetLans(v []KubernetesNodePoolLan) { - o.K8sVersion = &v + o.Lans = &v } -// HasK8sVersion returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasK8sVersion() bool { - if o != nil && o.K8sVersion != nil { +// HasLans returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasLans() bool { + if o != nil && o.Lans != nil { return true } @@ -459,7 +459,7 @@ func (o *KubernetesNodePoolProperties) HasK8sVersion() bool { } // GetMaintenanceWindow returns the MaintenanceWindow field value -// If the value is explicit nil, the zero value for KubernetesMaintenanceWindow will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodePoolProperties) GetMaintenanceWindow() *KubernetesMaintenanceWindow { if o == nil { return nil @@ -496,228 +496,228 @@ func (o *KubernetesNodePoolProperties) HasMaintenanceWindow() bool { return false } -// GetAutoScaling returns the AutoScaling field value -// If the value is explicit nil, the zero value for KubernetesAutoScaling will be returned -func (o *KubernetesNodePoolProperties) GetAutoScaling() *KubernetesAutoScaling { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetName() *string { if o == nil { return nil } - return o.AutoScaling + return o.Name } -// GetAutoScalingOk returns a tuple with the AutoScaling field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetAutoScalingOk() (*KubernetesAutoScaling, bool) { +func (o *KubernetesNodePoolProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.AutoScaling, true + return o.Name, true } -// SetAutoScaling sets field value -func (o *KubernetesNodePoolProperties) SetAutoScaling(v KubernetesAutoScaling) { +// SetName sets field value +func (o *KubernetesNodePoolProperties) SetName(v string) { - o.AutoScaling = &v + o.Name = &v } -// HasAutoScaling returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasAutoScaling() bool { - if o != nil && o.AutoScaling != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetLans returns the Lans field value -// If the value is explicit nil, the zero value for []KubernetesNodePoolLan will be returned -func (o *KubernetesNodePoolProperties) GetLans() *[]KubernetesNodePoolLan { +// GetNodeCount returns the NodeCount field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetNodeCount() *int32 { if o == nil { return nil } - return o.Lans + return o.NodeCount } -// GetLansOk returns a tuple with the Lans field value +// GetNodeCountOk returns a tuple with the NodeCount field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetLansOk() (*[]KubernetesNodePoolLan, bool) { +func (o *KubernetesNodePoolProperties) GetNodeCountOk() (*int32, bool) { if o == nil { return nil, false } - return o.Lans, true + return o.NodeCount, true } -// SetLans sets field value -func (o *KubernetesNodePoolProperties) SetLans(v []KubernetesNodePoolLan) { +// SetNodeCount sets field value +func (o *KubernetesNodePoolProperties) SetNodeCount(v int32) { - o.Lans = &v + o.NodeCount = &v } -// HasLans returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasLans() bool { - if o != nil && o.Lans != nil { +// HasNodeCount returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasNodeCount() bool { + if o != nil && o.NodeCount != nil { return true } return false } -// GetLabels returns the Labels field value -// If the value is explicit nil, the zero value for map[string]string will be returned -func (o *KubernetesNodePoolProperties) GetLabels() *map[string]string { +// GetPublicIps returns the PublicIps field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetPublicIps() *[]string { if o == nil { return nil } - return o.Labels + return o.PublicIps } -// GetLabelsOk returns a tuple with the Labels field value +// GetPublicIpsOk returns a tuple with the PublicIps field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetLabelsOk() (*map[string]string, bool) { +func (o *KubernetesNodePoolProperties) GetPublicIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Labels, true + return o.PublicIps, true } -// SetLabels sets field value -func (o *KubernetesNodePoolProperties) SetLabels(v map[string]string) { +// SetPublicIps sets field value +func (o *KubernetesNodePoolProperties) SetPublicIps(v []string) { - o.Labels = &v + o.PublicIps = &v } -// HasLabels returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasLabels() bool { - if o != nil && o.Labels != nil { +// HasPublicIps returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasPublicIps() bool { + if o != nil && o.PublicIps != nil { return true } return false } -// GetAnnotations returns the Annotations field value -// If the value is explicit nil, the zero value for map[string]string will be returned -func (o *KubernetesNodePoolProperties) GetAnnotations() *map[string]string { +// GetRamSize returns the RamSize field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetRamSize() *int32 { if o == nil { return nil } - return o.Annotations + return o.RamSize } -// GetAnnotationsOk returns a tuple with the Annotations field value +// GetRamSizeOk returns a tuple with the RamSize field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetAnnotationsOk() (*map[string]string, bool) { +func (o *KubernetesNodePoolProperties) GetRamSizeOk() (*int32, bool) { if o == nil { return nil, false } - return o.Annotations, true + return o.RamSize, true } -// SetAnnotations sets field value -func (o *KubernetesNodePoolProperties) SetAnnotations(v map[string]string) { +// SetRamSize sets field value +func (o *KubernetesNodePoolProperties) SetRamSize(v int32) { - o.Annotations = &v + o.RamSize = &v } -// HasAnnotations returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasAnnotations() bool { - if o != nil && o.Annotations != nil { +// HasRamSize returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasRamSize() bool { + if o != nil && o.RamSize != nil { return true } return false } -// GetPublicIps returns the PublicIps field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesNodePoolProperties) GetPublicIps() *[]string { +// GetStorageSize returns the StorageSize field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetStorageSize() *int32 { if o == nil { return nil } - return o.PublicIps + return o.StorageSize } -// GetPublicIpsOk returns a tuple with the PublicIps field value +// GetStorageSizeOk returns a tuple with the StorageSize field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetPublicIpsOk() (*[]string, bool) { +func (o *KubernetesNodePoolProperties) GetStorageSizeOk() (*int32, bool) { if o == nil { return nil, false } - return o.PublicIps, true + return o.StorageSize, true } -// SetPublicIps sets field value -func (o *KubernetesNodePoolProperties) SetPublicIps(v []string) { +// SetStorageSize sets field value +func (o *KubernetesNodePoolProperties) SetStorageSize(v int32) { - o.PublicIps = &v + o.StorageSize = &v } -// HasPublicIps returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasPublicIps() bool { - if o != nil && o.PublicIps != nil { +// HasStorageSize returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasStorageSize() bool { + if o != nil && o.StorageSize != nil { return true } return false } -// GetAvailableUpgradeVersions returns the AvailableUpgradeVersions field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesNodePoolProperties) GetAvailableUpgradeVersions() *[]string { +// GetStorageType returns the StorageType field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolProperties) GetStorageType() *string { if o == nil { return nil } - return o.AvailableUpgradeVersions + return o.StorageType } -// GetAvailableUpgradeVersionsOk returns a tuple with the AvailableUpgradeVersions field value +// GetStorageTypeOk returns a tuple with the StorageType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolProperties) GetAvailableUpgradeVersionsOk() (*[]string, bool) { +func (o *KubernetesNodePoolProperties) GetStorageTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.AvailableUpgradeVersions, true + return o.StorageType, true } -// SetAvailableUpgradeVersions sets field value -func (o *KubernetesNodePoolProperties) SetAvailableUpgradeVersions(v []string) { +// SetStorageType sets field value +func (o *KubernetesNodePoolProperties) SetStorageType(v string) { - o.AvailableUpgradeVersions = &v + o.StorageType = &v } -// HasAvailableUpgradeVersions returns a boolean if a field has been set. -func (o *KubernetesNodePoolProperties) HasAvailableUpgradeVersions() bool { - if o != nil && o.AvailableUpgradeVersions != nil { +// HasStorageType returns a boolean if a field has been set. +func (o *KubernetesNodePoolProperties) HasStorageType() bool { + if o != nil && o.StorageType != nil { return true } @@ -726,57 +726,74 @@ func (o *KubernetesNodePoolProperties) HasAvailableUpgradeVersions() bool { func (o KubernetesNodePoolProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.Annotations != nil { + toSerialize["annotations"] = o.Annotations } - if o.DatacenterId != nil { - toSerialize["datacenterId"] = o.DatacenterId + + if o.AutoScaling != nil { + toSerialize["autoScaling"] = o.AutoScaling } - if o.NodeCount != nil { - toSerialize["nodeCount"] = o.NodeCount + + if o.AvailabilityZone != nil { + toSerialize["availabilityZone"] = o.AvailabilityZone } - if o.CpuFamily != nil { - toSerialize["cpuFamily"] = o.CpuFamily + + if o.AvailableUpgradeVersions != nil { + toSerialize["availableUpgradeVersions"] = o.AvailableUpgradeVersions } + if o.CoresCount != nil { toSerialize["coresCount"] = o.CoresCount } - if o.RamSize != nil { - toSerialize["ramSize"] = o.RamSize - } - if o.AvailabilityZone != nil { - toSerialize["availabilityZone"] = o.AvailabilityZone - } - if o.StorageType != nil { - toSerialize["storageType"] = o.StorageType + + if o.CpuFamily != nil { + toSerialize["cpuFamily"] = o.CpuFamily } - if o.StorageSize != nil { - toSerialize["storageSize"] = o.StorageSize + + if o.DatacenterId != nil { + toSerialize["datacenterId"] = o.DatacenterId } + if o.K8sVersion != nil { toSerialize["k8sVersion"] = o.K8sVersion } - if o.MaintenanceWindow != nil { - toSerialize["maintenanceWindow"] = o.MaintenanceWindow - } - if o.AutoScaling != nil { - toSerialize["autoScaling"] = o.AutoScaling + + if o.Labels != nil { + toSerialize["labels"] = o.Labels } + if o.Lans != nil { toSerialize["lans"] = o.Lans } - if o.Labels != nil { - toSerialize["labels"] = o.Labels + + if o.MaintenanceWindow != nil { + toSerialize["maintenanceWindow"] = o.MaintenanceWindow } - if o.Annotations != nil { - toSerialize["annotations"] = o.Annotations + + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.NodeCount != nil { + toSerialize["nodeCount"] = o.NodeCount } + if o.PublicIps != nil { toSerialize["publicIps"] = o.PublicIps } - if o.AvailableUpgradeVersions != nil { - toSerialize["availableUpgradeVersions"] = o.AvailableUpgradeVersions + + if o.RamSize != nil { + toSerialize["ramSize"] = o.RamSize } + + if o.StorageSize != nil { + toSerialize["storageSize"] = o.StorageSize + } + + if o.StorageType != nil { + toSerialize["storageType"] = o.StorageType + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go index dff10d338..c55191cfa 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go @@ -16,54 +16,54 @@ import ( // KubernetesNodePoolPropertiesForPost struct for KubernetesNodePoolPropertiesForPost type KubernetesNodePoolPropertiesForPost struct { - // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. - Name *string `json:"name"` + // The annotations attached to the node pool. + Annotations *map[string]string `json:"annotations,omitempty"` + AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` + // The availability zone in which the target VM should be provisioned. + AvailabilityZone *string `json:"availabilityZone"` + // The total number of cores for the nodes. + CoresCount *int32 `json:"coresCount"` + // The CPU type for the nodes. + CpuFamily *string `json:"cpuFamily"` // The unique identifier of the VDC where the worker nodes of the node pool are provisioned.Note that the data center is located in the exact place where the parent cluster of the node pool is located. DatacenterId *string `json:"datacenterId"` + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. + K8sVersion *string `json:"k8sVersion,omitempty"` + // The labels attached to the node pool. + Labels *map[string]string `json:"labels,omitempty"` + // The array of existing private LANs to attach to worker nodes. + Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` + MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` + // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. + Name *string `json:"name"` // The number of worker nodes of the node pool. NodeCount *int32 `json:"nodeCount"` - // The CPU type for the nodes. - CpuFamily *string `json:"cpuFamily"` - // The total number of cores for the nodes. - CoresCount *int32 `json:"coresCount"` + // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. + PublicIps *[]string `json:"publicIps,omitempty"` // The RAM size for the nodes. Must be specified in multiples of 1024 MB, with a minimum size of 2048 MB. RamSize *int32 `json:"ramSize"` - // The availability zone in which the target VM should be provisioned. - AvailabilityZone *string `json:"availabilityZone"` - // The storage type for the nodes. - StorageType *string `json:"storageType"` // The allocated volume size in GB. The allocated volume size in GB. To achieve good performance, we recommend a size greater than 100GB for SSD. StorageSize *int32 `json:"storageSize"` - // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. - K8sVersion *string `json:"k8sVersion,omitempty"` - MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` - // The array of existing private LANs to attach to worker nodes. - Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` - // The labels attached to the node pool. - Labels *map[string]string `json:"labels,omitempty"` - // The annotations attached to the node pool. - Annotations *map[string]string `json:"annotations,omitempty"` - // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. - PublicIps *[]string `json:"publicIps,omitempty"` + // The storage type for the nodes. + StorageType *string `json:"storageType"` } // NewKubernetesNodePoolPropertiesForPost instantiates a new KubernetesNodePoolPropertiesForPost object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewKubernetesNodePoolPropertiesForPost(name string, datacenterId string, nodeCount int32, cpuFamily string, coresCount int32, ramSize int32, availabilityZone string, storageType string, storageSize int32) *KubernetesNodePoolPropertiesForPost { +func NewKubernetesNodePoolPropertiesForPost(availabilityZone string, coresCount int32, cpuFamily string, datacenterId string, name string, nodeCount int32, ramSize int32, storageSize int32, storageType string) *KubernetesNodePoolPropertiesForPost { this := KubernetesNodePoolPropertiesForPost{} - this.Name = &name + this.AvailabilityZone = &availabilityZone + this.CoresCount = &coresCount + this.CpuFamily = &cpuFamily this.DatacenterId = &datacenterId + this.Name = &name this.NodeCount = &nodeCount - this.CpuFamily = &cpuFamily - this.CoresCount = &coresCount this.RamSize = &ramSize - this.AvailabilityZone = &availabilityZone - this.StorageType = &storageType this.StorageSize = &storageSize + this.StorageType = &storageType return &this } @@ -76,608 +76,608 @@ func NewKubernetesNodePoolPropertiesForPostWithDefaults() *KubernetesNodePoolPro return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetName() *string { +// GetAnnotations returns the Annotations field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetAnnotations() *map[string]string { if o == nil { return nil } - return o.Name + return o.Annotations } -// GetNameOk returns a tuple with the Name field value +// GetAnnotationsOk returns a tuple with the Annotations field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetNameOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetAnnotationsOk() (*map[string]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Annotations, true } -// SetName sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetName(v string) { +// SetAnnotations sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetAnnotations(v map[string]string) { - o.Name = &v + o.Annotations = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasName() bool { - if o != nil && o.Name != nil { +// HasAnnotations returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasAnnotations() bool { + if o != nil && o.Annotations != nil { return true } return false } -// GetDatacenterId returns the DatacenterId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetDatacenterId() *string { +// GetAutoScaling returns the AutoScaling field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetAutoScaling() *KubernetesAutoScaling { if o == nil { return nil } - return o.DatacenterId + return o.AutoScaling } -// GetDatacenterIdOk returns a tuple with the DatacenterId field value +// GetAutoScalingOk returns a tuple with the AutoScaling field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetDatacenterIdOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetAutoScalingOk() (*KubernetesAutoScaling, bool) { if o == nil { return nil, false } - return o.DatacenterId, true + return o.AutoScaling, true } -// SetDatacenterId sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetDatacenterId(v string) { +// SetAutoScaling sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetAutoScaling(v KubernetesAutoScaling) { - o.DatacenterId = &v + o.AutoScaling = &v } -// HasDatacenterId returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasDatacenterId() bool { - if o != nil && o.DatacenterId != nil { +// HasAutoScaling returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasAutoScaling() bool { + if o != nil && o.AutoScaling != nil { return true } return false } -// GetNodeCount returns the NodeCount field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetNodeCount() *int32 { +// GetAvailabilityZone returns the AvailabilityZone field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetAvailabilityZone() *string { if o == nil { return nil } - return o.NodeCount + return o.AvailabilityZone } -// GetNodeCountOk returns a tuple with the NodeCount field value +// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetNodeCountOk() (*int32, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetAvailabilityZoneOk() (*string, bool) { if o == nil { return nil, false } - return o.NodeCount, true + return o.AvailabilityZone, true } -// SetNodeCount sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetNodeCount(v int32) { +// SetAvailabilityZone sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetAvailabilityZone(v string) { - o.NodeCount = &v + o.AvailabilityZone = &v } -// HasNodeCount returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasNodeCount() bool { - if o != nil && o.NodeCount != nil { +// HasAvailabilityZone returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasAvailabilityZone() bool { + if o != nil && o.AvailabilityZone != nil { return true } return false } -// GetCpuFamily returns the CpuFamily field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetCpuFamily() *string { +// GetCoresCount returns the CoresCount field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetCoresCount() *int32 { if o == nil { return nil } - return o.CpuFamily + return o.CoresCount } -// GetCpuFamilyOk returns a tuple with the CpuFamily field value +// GetCoresCountOk returns a tuple with the CoresCount field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetCpuFamilyOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetCoresCountOk() (*int32, bool) { if o == nil { return nil, false } - return o.CpuFamily, true + return o.CoresCount, true } -// SetCpuFamily sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetCpuFamily(v string) { +// SetCoresCount sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetCoresCount(v int32) { - o.CpuFamily = &v + o.CoresCount = &v } -// HasCpuFamily returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasCpuFamily() bool { - if o != nil && o.CpuFamily != nil { +// HasCoresCount returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasCoresCount() bool { + if o != nil && o.CoresCount != nil { return true } return false } -// GetCoresCount returns the CoresCount field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetCoresCount() *int32 { +// GetCpuFamily returns the CpuFamily field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetCpuFamily() *string { if o == nil { return nil } - return o.CoresCount + return o.CpuFamily } -// GetCoresCountOk returns a tuple with the CoresCount field value +// GetCpuFamilyOk returns a tuple with the CpuFamily field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetCoresCountOk() (*int32, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetCpuFamilyOk() (*string, bool) { if o == nil { return nil, false } - return o.CoresCount, true + return o.CpuFamily, true } -// SetCoresCount sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetCoresCount(v int32) { +// SetCpuFamily sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetCpuFamily(v string) { - o.CoresCount = &v + o.CpuFamily = &v } -// HasCoresCount returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasCoresCount() bool { - if o != nil && o.CoresCount != nil { +// HasCpuFamily returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasCpuFamily() bool { + if o != nil && o.CpuFamily != nil { return true } return false } -// GetRamSize returns the RamSize field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetRamSize() *int32 { +// GetDatacenterId returns the DatacenterId field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetDatacenterId() *string { if o == nil { return nil } - return o.RamSize + return o.DatacenterId } -// GetRamSizeOk returns a tuple with the RamSize field value +// GetDatacenterIdOk returns a tuple with the DatacenterId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetRamSizeOk() (*int32, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetDatacenterIdOk() (*string, bool) { if o == nil { return nil, false } - return o.RamSize, true + return o.DatacenterId, true } -// SetRamSize sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetRamSize(v int32) { +// SetDatacenterId sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetDatacenterId(v string) { - o.RamSize = &v + o.DatacenterId = &v } -// HasRamSize returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasRamSize() bool { - if o != nil && o.RamSize != nil { +// HasDatacenterId returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasDatacenterId() bool { + if o != nil && o.DatacenterId != nil { return true } return false } -// GetAvailabilityZone returns the AvailabilityZone field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetAvailabilityZone() *string { +// GetK8sVersion returns the K8sVersion field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetK8sVersion() *string { if o == nil { return nil } - return o.AvailabilityZone + return o.K8sVersion } -// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value +// GetK8sVersionOk returns a tuple with the K8sVersion field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetAvailabilityZoneOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetK8sVersionOk() (*string, bool) { if o == nil { return nil, false } - return o.AvailabilityZone, true + return o.K8sVersion, true } -// SetAvailabilityZone sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetAvailabilityZone(v string) { +// SetK8sVersion sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetK8sVersion(v string) { - o.AvailabilityZone = &v + o.K8sVersion = &v } -// HasAvailabilityZone returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasAvailabilityZone() bool { - if o != nil && o.AvailabilityZone != nil { +// HasK8sVersion returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasK8sVersion() bool { + if o != nil && o.K8sVersion != nil { return true } return false } -// GetStorageType returns the StorageType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetStorageType() *string { +// GetLabels returns the Labels field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetLabels() *map[string]string { if o == nil { return nil } - return o.StorageType + return o.Labels } -// GetStorageTypeOk returns a tuple with the StorageType field value +// GetLabelsOk returns a tuple with the Labels field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetStorageTypeOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetLabelsOk() (*map[string]string, bool) { if o == nil { return nil, false } - return o.StorageType, true + return o.Labels, true } -// SetStorageType sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetStorageType(v string) { +// SetLabels sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetLabels(v map[string]string) { - o.StorageType = &v + o.Labels = &v } -// HasStorageType returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasStorageType() bool { - if o != nil && o.StorageType != nil { +// HasLabels returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasLabels() bool { + if o != nil && o.Labels != nil { return true } return false } -// GetStorageSize returns the StorageSize field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetStorageSize() *int32 { +// GetLans returns the Lans field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetLans() *[]KubernetesNodePoolLan { if o == nil { return nil } - return o.StorageSize + return o.Lans } -// GetStorageSizeOk returns a tuple with the StorageSize field value +// GetLansOk returns a tuple with the Lans field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetStorageSizeOk() (*int32, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetLansOk() (*[]KubernetesNodePoolLan, bool) { if o == nil { return nil, false } - return o.StorageSize, true + return o.Lans, true } -// SetStorageSize sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetStorageSize(v int32) { +// SetLans sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetLans(v []KubernetesNodePoolLan) { - o.StorageSize = &v + o.Lans = &v } -// HasStorageSize returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasStorageSize() bool { - if o != nil && o.StorageSize != nil { +// HasLans returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasLans() bool { + if o != nil && o.Lans != nil { return true } return false } -// GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetK8sVersion() *string { +// GetMaintenanceWindow returns the MaintenanceWindow field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetMaintenanceWindow() *KubernetesMaintenanceWindow { if o == nil { return nil } - return o.K8sVersion + return o.MaintenanceWindow } -// GetK8sVersionOk returns a tuple with the K8sVersion field value +// GetMaintenanceWindowOk returns a tuple with the MaintenanceWindow field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetK8sVersionOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetMaintenanceWindowOk() (*KubernetesMaintenanceWindow, bool) { if o == nil { return nil, false } - return o.K8sVersion, true + return o.MaintenanceWindow, true } -// SetK8sVersion sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetK8sVersion(v string) { +// SetMaintenanceWindow sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetMaintenanceWindow(v KubernetesMaintenanceWindow) { - o.K8sVersion = &v + o.MaintenanceWindow = &v } -// HasK8sVersion returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasK8sVersion() bool { - if o != nil && o.K8sVersion != nil { +// HasMaintenanceWindow returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasMaintenanceWindow() bool { + if o != nil && o.MaintenanceWindow != nil { return true } return false } -// GetMaintenanceWindow returns the MaintenanceWindow field value -// If the value is explicit nil, the zero value for KubernetesMaintenanceWindow will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetMaintenanceWindow() *KubernetesMaintenanceWindow { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetName() *string { if o == nil { return nil } - return o.MaintenanceWindow + return o.Name } -// GetMaintenanceWindowOk returns a tuple with the MaintenanceWindow field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetMaintenanceWindowOk() (*KubernetesMaintenanceWindow, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.MaintenanceWindow, true + return o.Name, true } -// SetMaintenanceWindow sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetMaintenanceWindow(v KubernetesMaintenanceWindow) { +// SetName sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetName(v string) { - o.MaintenanceWindow = &v + o.Name = &v } -// HasMaintenanceWindow returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasMaintenanceWindow() bool { - if o != nil && o.MaintenanceWindow != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetAutoScaling returns the AutoScaling field value -// If the value is explicit nil, the zero value for KubernetesAutoScaling will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetAutoScaling() *KubernetesAutoScaling { +// GetNodeCount returns the NodeCount field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetNodeCount() *int32 { if o == nil { return nil } - return o.AutoScaling + return o.NodeCount } -// GetAutoScalingOk returns a tuple with the AutoScaling field value +// GetNodeCountOk returns a tuple with the NodeCount field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetAutoScalingOk() (*KubernetesAutoScaling, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetNodeCountOk() (*int32, bool) { if o == nil { return nil, false } - return o.AutoScaling, true + return o.NodeCount, true } -// SetAutoScaling sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetAutoScaling(v KubernetesAutoScaling) { +// SetNodeCount sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetNodeCount(v int32) { - o.AutoScaling = &v + o.NodeCount = &v } -// HasAutoScaling returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasAutoScaling() bool { - if o != nil && o.AutoScaling != nil { +// HasNodeCount returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasNodeCount() bool { + if o != nil && o.NodeCount != nil { return true } return false } -// GetLans returns the Lans field value -// If the value is explicit nil, the zero value for []KubernetesNodePoolLan will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetLans() *[]KubernetesNodePoolLan { +// GetPublicIps returns the PublicIps field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetPublicIps() *[]string { if o == nil { return nil } - return o.Lans + return o.PublicIps } -// GetLansOk returns a tuple with the Lans field value +// GetPublicIpsOk returns a tuple with the PublicIps field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetLansOk() (*[]KubernetesNodePoolLan, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetPublicIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Lans, true + return o.PublicIps, true } -// SetLans sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetLans(v []KubernetesNodePoolLan) { +// SetPublicIps sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetPublicIps(v []string) { - o.Lans = &v + o.PublicIps = &v } -// HasLans returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasLans() bool { - if o != nil && o.Lans != nil { +// HasPublicIps returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasPublicIps() bool { + if o != nil && o.PublicIps != nil { return true } return false } -// GetLabels returns the Labels field value -// If the value is explicit nil, the zero value for map[string]string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetLabels() *map[string]string { +// GetRamSize returns the RamSize field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetRamSize() *int32 { if o == nil { return nil } - return o.Labels + return o.RamSize } -// GetLabelsOk returns a tuple with the Labels field value +// GetRamSizeOk returns a tuple with the RamSize field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetLabelsOk() (*map[string]string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetRamSizeOk() (*int32, bool) { if o == nil { return nil, false } - return o.Labels, true + return o.RamSize, true } -// SetLabels sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetLabels(v map[string]string) { +// SetRamSize sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetRamSize(v int32) { - o.Labels = &v + o.RamSize = &v } -// HasLabels returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasLabels() bool { - if o != nil && o.Labels != nil { +// HasRamSize returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasRamSize() bool { + if o != nil && o.RamSize != nil { return true } return false } -// GetAnnotations returns the Annotations field value -// If the value is explicit nil, the zero value for map[string]string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetAnnotations() *map[string]string { +// GetStorageSize returns the StorageSize field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetStorageSize() *int32 { if o == nil { return nil } - return o.Annotations + return o.StorageSize } -// GetAnnotationsOk returns a tuple with the Annotations field value +// GetStorageSizeOk returns a tuple with the StorageSize field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetAnnotationsOk() (*map[string]string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetStorageSizeOk() (*int32, bool) { if o == nil { return nil, false } - return o.Annotations, true + return o.StorageSize, true } -// SetAnnotations sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetAnnotations(v map[string]string) { +// SetStorageSize sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetStorageSize(v int32) { - o.Annotations = &v + o.StorageSize = &v } -// HasAnnotations returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasAnnotations() bool { - if o != nil && o.Annotations != nil { +// HasStorageSize returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasStorageSize() bool { + if o != nil && o.StorageSize != nil { return true } return false } -// GetPublicIps returns the PublicIps field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetPublicIps() *[]string { +// GetStorageType returns the StorageType field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPost) GetStorageType() *string { if o == nil { return nil } - return o.PublicIps + return o.StorageType } -// GetPublicIpsOk returns a tuple with the PublicIps field value +// GetStorageTypeOk returns a tuple with the StorageType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPost) GetPublicIpsOk() (*[]string, bool) { +func (o *KubernetesNodePoolPropertiesForPost) GetStorageTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.PublicIps, true + return o.StorageType, true } -// SetPublicIps sets field value -func (o *KubernetesNodePoolPropertiesForPost) SetPublicIps(v []string) { +// SetStorageType sets field value +func (o *KubernetesNodePoolPropertiesForPost) SetStorageType(v string) { - o.PublicIps = &v + o.StorageType = &v } -// HasPublicIps returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPost) HasPublicIps() bool { - if o != nil && o.PublicIps != nil { +// HasStorageType returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPost) HasStorageType() bool { + if o != nil && o.StorageType != nil { return true } @@ -686,54 +686,70 @@ func (o *KubernetesNodePoolPropertiesForPost) HasPublicIps() bool { func (o KubernetesNodePoolPropertiesForPost) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } - if o.DatacenterId != nil { - toSerialize["datacenterId"] = o.DatacenterId + if o.Annotations != nil { + toSerialize["annotations"] = o.Annotations } - if o.NodeCount != nil { - toSerialize["nodeCount"] = o.NodeCount + + if o.AutoScaling != nil { + toSerialize["autoScaling"] = o.AutoScaling } - if o.CpuFamily != nil { - toSerialize["cpuFamily"] = o.CpuFamily + + if o.AvailabilityZone != nil { + toSerialize["availabilityZone"] = o.AvailabilityZone } + if o.CoresCount != nil { toSerialize["coresCount"] = o.CoresCount } - if o.RamSize != nil { - toSerialize["ramSize"] = o.RamSize - } - if o.AvailabilityZone != nil { - toSerialize["availabilityZone"] = o.AvailabilityZone - } - if o.StorageType != nil { - toSerialize["storageType"] = o.StorageType + + if o.CpuFamily != nil { + toSerialize["cpuFamily"] = o.CpuFamily } - if o.StorageSize != nil { - toSerialize["storageSize"] = o.StorageSize + + if o.DatacenterId != nil { + toSerialize["datacenterId"] = o.DatacenterId } + if o.K8sVersion != nil { toSerialize["k8sVersion"] = o.K8sVersion } - if o.MaintenanceWindow != nil { - toSerialize["maintenanceWindow"] = o.MaintenanceWindow - } - if o.AutoScaling != nil { - toSerialize["autoScaling"] = o.AutoScaling + + if o.Labels != nil { + toSerialize["labels"] = o.Labels } + if o.Lans != nil { toSerialize["lans"] = o.Lans } - if o.Labels != nil { - toSerialize["labels"] = o.Labels + + if o.MaintenanceWindow != nil { + toSerialize["maintenanceWindow"] = o.MaintenanceWindow } - if o.Annotations != nil { - toSerialize["annotations"] = o.Annotations + + if o.Name != nil { + toSerialize["name"] = o.Name } + + if o.NodeCount != nil { + toSerialize["nodeCount"] = o.NodeCount + } + if o.PublicIps != nil { toSerialize["publicIps"] = o.PublicIps } + + if o.RamSize != nil { + toSerialize["ramSize"] = o.RamSize + } + + if o.StorageSize != nil { + toSerialize["storageSize"] = o.StorageSize + } + + if o.StorageType != nil { + toSerialize["storageType"] = o.StorageType + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go index 88c31def6..3e82ea9c4 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go @@ -16,20 +16,20 @@ import ( // KubernetesNodePoolPropertiesForPut struct for KubernetesNodePoolPropertiesForPut type KubernetesNodePoolPropertiesForPut struct { + // The annotations attached to the node pool. + Annotations *map[string]string `json:"annotations,omitempty"` + AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. + K8sVersion *string `json:"k8sVersion,omitempty"` + // The labels attached to the node pool. + Labels *map[string]string `json:"labels,omitempty"` + // The array of existing private LANs to attach to worker nodes. + Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` + MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. Name *string `json:"name,omitempty"` // The number of worker nodes of the node pool. NodeCount *int32 `json:"nodeCount"` - // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. - K8sVersion *string `json:"k8sVersion,omitempty"` - MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` - // The array of existing private LANs to attach to worker nodes. - Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` - // The labels attached to the node pool. - Labels *map[string]string `json:"labels,omitempty"` - // The annotations attached to the node pool. - Annotations *map[string]string `json:"annotations,omitempty"` // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. PublicIps *[]string `json:"publicIps,omitempty"` } @@ -54,76 +54,76 @@ func NewKubernetesNodePoolPropertiesForPutWithDefaults() *KubernetesNodePoolProp return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetName() *string { +// GetAnnotations returns the Annotations field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetAnnotations() *map[string]string { if o == nil { return nil } - return o.Name + return o.Annotations } -// GetNameOk returns a tuple with the Name field value +// GetAnnotationsOk returns a tuple with the Annotations field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetNameOk() (*string, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetAnnotationsOk() (*map[string]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Annotations, true } -// SetName sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetName(v string) { +// SetAnnotations sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetAnnotations(v map[string]string) { - o.Name = &v + o.Annotations = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasName() bool { - if o != nil && o.Name != nil { +// HasAnnotations returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasAnnotations() bool { + if o != nil && o.Annotations != nil { return true } return false } -// GetNodeCount returns the NodeCount field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetNodeCount() *int32 { +// GetAutoScaling returns the AutoScaling field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetAutoScaling() *KubernetesAutoScaling { if o == nil { return nil } - return o.NodeCount + return o.AutoScaling } -// GetNodeCountOk returns a tuple with the NodeCount field value +// GetAutoScalingOk returns a tuple with the AutoScaling field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetNodeCountOk() (*int32, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetAutoScalingOk() (*KubernetesAutoScaling, bool) { if o == nil { return nil, false } - return o.NodeCount, true + return o.AutoScaling, true } -// SetNodeCount sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetNodeCount(v int32) { +// SetAutoScaling sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetAutoScaling(v KubernetesAutoScaling) { - o.NodeCount = &v + o.AutoScaling = &v } -// HasNodeCount returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasNodeCount() bool { - if o != nil && o.NodeCount != nil { +// HasAutoScaling returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasAutoScaling() bool { + if o != nil && o.AutoScaling != nil { return true } @@ -131,7 +131,7 @@ func (o *KubernetesNodePoolPropertiesForPut) HasNodeCount() bool { } // GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodePoolPropertiesForPut) GetK8sVersion() *string { if o == nil { return nil @@ -168,190 +168,190 @@ func (o *KubernetesNodePoolPropertiesForPut) HasK8sVersion() bool { return false } -// GetMaintenanceWindow returns the MaintenanceWindow field value -// If the value is explicit nil, the zero value for KubernetesMaintenanceWindow will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetMaintenanceWindow() *KubernetesMaintenanceWindow { +// GetLabels returns the Labels field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetLabels() *map[string]string { if o == nil { return nil } - return o.MaintenanceWindow + return o.Labels } -// GetMaintenanceWindowOk returns a tuple with the MaintenanceWindow field value +// GetLabelsOk returns a tuple with the Labels field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetMaintenanceWindowOk() (*KubernetesMaintenanceWindow, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetLabelsOk() (*map[string]string, bool) { if o == nil { return nil, false } - return o.MaintenanceWindow, true + return o.Labels, true } -// SetMaintenanceWindow sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetMaintenanceWindow(v KubernetesMaintenanceWindow) { +// SetLabels sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetLabels(v map[string]string) { - o.MaintenanceWindow = &v + o.Labels = &v } -// HasMaintenanceWindow returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasMaintenanceWindow() bool { - if o != nil && o.MaintenanceWindow != nil { +// HasLabels returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasLabels() bool { + if o != nil && o.Labels != nil { return true } return false } -// GetAutoScaling returns the AutoScaling field value -// If the value is explicit nil, the zero value for KubernetesAutoScaling will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetAutoScaling() *KubernetesAutoScaling { +// GetLans returns the Lans field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetLans() *[]KubernetesNodePoolLan { if o == nil { return nil } - return o.AutoScaling + return o.Lans } -// GetAutoScalingOk returns a tuple with the AutoScaling field value +// GetLansOk returns a tuple with the Lans field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetAutoScalingOk() (*KubernetesAutoScaling, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetLansOk() (*[]KubernetesNodePoolLan, bool) { if o == nil { return nil, false } - return o.AutoScaling, true + return o.Lans, true } -// SetAutoScaling sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetAutoScaling(v KubernetesAutoScaling) { +// SetLans sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetLans(v []KubernetesNodePoolLan) { - o.AutoScaling = &v + o.Lans = &v } -// HasAutoScaling returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasAutoScaling() bool { - if o != nil && o.AutoScaling != nil { +// HasLans returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasLans() bool { + if o != nil && o.Lans != nil { return true } return false } -// GetLans returns the Lans field value -// If the value is explicit nil, the zero value for []KubernetesNodePoolLan will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetLans() *[]KubernetesNodePoolLan { +// GetMaintenanceWindow returns the MaintenanceWindow field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetMaintenanceWindow() *KubernetesMaintenanceWindow { if o == nil { return nil } - return o.Lans + return o.MaintenanceWindow } -// GetLansOk returns a tuple with the Lans field value +// GetMaintenanceWindowOk returns a tuple with the MaintenanceWindow field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetLansOk() (*[]KubernetesNodePoolLan, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetMaintenanceWindowOk() (*KubernetesMaintenanceWindow, bool) { if o == nil { return nil, false } - return o.Lans, true + return o.MaintenanceWindow, true } -// SetLans sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetLans(v []KubernetesNodePoolLan) { +// SetMaintenanceWindow sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetMaintenanceWindow(v KubernetesMaintenanceWindow) { - o.Lans = &v + o.MaintenanceWindow = &v } -// HasLans returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasLans() bool { - if o != nil && o.Lans != nil { +// HasMaintenanceWindow returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasMaintenanceWindow() bool { + if o != nil && o.MaintenanceWindow != nil { return true } return false } -// GetLabels returns the Labels field value -// If the value is explicit nil, the zero value for map[string]string will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetLabels() *map[string]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetName() *string { if o == nil { return nil } - return o.Labels + return o.Name } -// GetLabelsOk returns a tuple with the Labels field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetLabelsOk() (*map[string]string, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Labels, true + return o.Name, true } -// SetLabels sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetLabels(v map[string]string) { +// SetName sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetName(v string) { - o.Labels = &v + o.Name = &v } -// HasLabels returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasLabels() bool { - if o != nil && o.Labels != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetAnnotations returns the Annotations field value -// If the value is explicit nil, the zero value for map[string]string will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetAnnotations() *map[string]string { +// GetNodeCount returns the NodeCount field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePoolPropertiesForPut) GetNodeCount() *int32 { if o == nil { return nil } - return o.Annotations + return o.NodeCount } -// GetAnnotationsOk returns a tuple with the Annotations field value +// GetNodeCountOk returns a tuple with the NodeCount field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePoolPropertiesForPut) GetAnnotationsOk() (*map[string]string, bool) { +func (o *KubernetesNodePoolPropertiesForPut) GetNodeCountOk() (*int32, bool) { if o == nil { return nil, false } - return o.Annotations, true + return o.NodeCount, true } -// SetAnnotations sets field value -func (o *KubernetesNodePoolPropertiesForPut) SetAnnotations(v map[string]string) { +// SetNodeCount sets field value +func (o *KubernetesNodePoolPropertiesForPut) SetNodeCount(v int32) { - o.Annotations = &v + o.NodeCount = &v } -// HasAnnotations returns a boolean if a field has been set. -func (o *KubernetesNodePoolPropertiesForPut) HasAnnotations() bool { - if o != nil && o.Annotations != nil { +// HasNodeCount returns a boolean if a field has been set. +func (o *KubernetesNodePoolPropertiesForPut) HasNodeCount() bool { + if o != nil && o.NodeCount != nil { return true } @@ -359,7 +359,7 @@ func (o *KubernetesNodePoolPropertiesForPut) HasAnnotations() bool { } // GetPublicIps returns the PublicIps field value -// If the value is explicit nil, the zero value for []string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodePoolPropertiesForPut) GetPublicIps() *[]string { if o == nil { return nil @@ -398,33 +398,42 @@ func (o *KubernetesNodePoolPropertiesForPut) HasPublicIps() bool { func (o KubernetesNodePoolPropertiesForPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.Annotations != nil { + toSerialize["annotations"] = o.Annotations } - if o.NodeCount != nil { - toSerialize["nodeCount"] = o.NodeCount + + if o.AutoScaling != nil { + toSerialize["autoScaling"] = o.AutoScaling } + if o.K8sVersion != nil { toSerialize["k8sVersion"] = o.K8sVersion } - if o.MaintenanceWindow != nil { - toSerialize["maintenanceWindow"] = o.MaintenanceWindow - } - if o.AutoScaling != nil { - toSerialize["autoScaling"] = o.AutoScaling + + if o.Labels != nil { + toSerialize["labels"] = o.Labels } + if o.Lans != nil { toSerialize["lans"] = o.Lans } - if o.Labels != nil { - toSerialize["labels"] = o.Labels + + if o.MaintenanceWindow != nil { + toSerialize["maintenanceWindow"] = o.MaintenanceWindow } - if o.Annotations != nil { - toSerialize["annotations"] = o.Annotations + + if o.Name != nil { + toSerialize["name"] = o.Name } + + if o.NodeCount != nil { + toSerialize["nodeCount"] = o.NodeCount + } + if o.PublicIps != nil { toSerialize["publicIps"] = o.PublicIps } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go index 61df80e6d..50ebf5568 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go @@ -16,14 +16,14 @@ import ( // KubernetesNodePools struct for KubernetesNodePools type KubernetesNodePools struct { - // A unique representation of the Kubernetes node pool as a resource collection. - Id *string `json:"id,omitempty"` - // The resource type within a collection. - Type *string `json:"type,omitempty"` // The URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` + // A unique representation of the Kubernetes node pool as a resource collection. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]KubernetesNodePool `json:"items,omitempty"` + // The resource type within a collection. + Type *string `json:"type,omitempty"` } // NewKubernetesNodePools instantiates a new KubernetesNodePools object @@ -44,152 +44,152 @@ func NewKubernetesNodePoolsWithDefaults() *KubernetesNodePools { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePools) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePools) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePools) GetIdOk() (*string, bool) { +func (o *KubernetesNodePools) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesNodePools) SetId(v string) { +// SetHref sets field value +func (o *KubernetesNodePools) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNodePools) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesNodePools) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePools) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePools) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePools) GetTypeOk() (*string, bool) { +func (o *KubernetesNodePools) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesNodePools) SetType(v string) { +// SetId sets field value +func (o *KubernetesNodePools) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesNodePools) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNodePools) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodePools) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePools) GetItems() *[]KubernetesNodePool { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePools) GetHrefOk() (*string, bool) { +func (o *KubernetesNodePools) GetItemsOk() (*[]KubernetesNodePool, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *KubernetesNodePools) SetHref(v string) { +// SetItems sets field value +func (o *KubernetesNodePools) SetItems(v []KubernetesNodePool) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesNodePools) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *KubernetesNodePools) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []KubernetesNodePool will be returned -func (o *KubernetesNodePools) GetItems() *[]KubernetesNodePool { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodePools) GetType() *string { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodePools) GetItemsOk() (*[]KubernetesNodePool, bool) { +func (o *KubernetesNodePools) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *KubernetesNodePools) SetItems(v []KubernetesNodePool) { +// SetType sets field value +func (o *KubernetesNodePools) SetType(v string) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *KubernetesNodePools) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesNodePools) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *KubernetesNodePools) HasItems() bool { func (o KubernetesNodePools) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go index 652521484..e5f67c963 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go @@ -16,25 +16,25 @@ import ( // KubernetesNodeProperties struct for KubernetesNodeProperties type KubernetesNodeProperties struct { + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. + K8sVersion *string `json:"k8sVersion"` // The Kubernetes node name. Name *string `json:"name"` - // The public IP associated with the node. - PublicIP *string `json:"publicIP,omitempty"` // The private IP associated with the node. PrivateIP *string `json:"privateIP,omitempty"` - // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. - K8sVersion *string `json:"k8sVersion"` + // The public IP associated with the node. + PublicIP *string `json:"publicIP,omitempty"` } // NewKubernetesNodeProperties instantiates a new KubernetesNodeProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewKubernetesNodeProperties(name string, k8sVersion string) *KubernetesNodeProperties { +func NewKubernetesNodeProperties(k8sVersion string, name string) *KubernetesNodeProperties { this := KubernetesNodeProperties{} - this.Name = &name this.K8sVersion = &k8sVersion + this.Name = &name return &this } @@ -47,76 +47,76 @@ func NewKubernetesNodePropertiesWithDefaults() *KubernetesNodeProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodeProperties) GetName() *string { +// GetK8sVersion returns the K8sVersion field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeProperties) GetK8sVersion() *string { if o == nil { return nil } - return o.Name + return o.K8sVersion } -// GetNameOk returns a tuple with the Name field value +// GetK8sVersionOk returns a tuple with the K8sVersion field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeProperties) GetNameOk() (*string, bool) { +func (o *KubernetesNodeProperties) GetK8sVersionOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.K8sVersion, true } -// SetName sets field value -func (o *KubernetesNodeProperties) SetName(v string) { +// SetK8sVersion sets field value +func (o *KubernetesNodeProperties) SetK8sVersion(v string) { - o.Name = &v + o.K8sVersion = &v } -// HasName returns a boolean if a field has been set. -func (o *KubernetesNodeProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasK8sVersion returns a boolean if a field has been set. +func (o *KubernetesNodeProperties) HasK8sVersion() bool { + if o != nil && o.K8sVersion != nil { return true } return false } -// GetPublicIP returns the PublicIP field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodeProperties) GetPublicIP() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeProperties) GetName() *string { if o == nil { return nil } - return o.PublicIP + return o.Name } -// GetPublicIPOk returns a tuple with the PublicIP field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeProperties) GetPublicIPOk() (*string, bool) { +func (o *KubernetesNodeProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.PublicIP, true + return o.Name, true } -// SetPublicIP sets field value -func (o *KubernetesNodeProperties) SetPublicIP(v string) { +// SetName sets field value +func (o *KubernetesNodeProperties) SetName(v string) { - o.PublicIP = &v + o.Name = &v } -// HasPublicIP returns a boolean if a field has been set. -func (o *KubernetesNodeProperties) HasPublicIP() bool { - if o != nil && o.PublicIP != nil { +// HasName returns a boolean if a field has been set. +func (o *KubernetesNodeProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -124,7 +124,7 @@ func (o *KubernetesNodeProperties) HasPublicIP() bool { } // GetPrivateIP returns the PrivateIP field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *KubernetesNodeProperties) GetPrivateIP() *string { if o == nil { return nil @@ -161,38 +161,38 @@ func (o *KubernetesNodeProperties) HasPrivateIP() bool { return false } -// GetK8sVersion returns the K8sVersion field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodeProperties) GetK8sVersion() *string { +// GetPublicIP returns the PublicIP field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodeProperties) GetPublicIP() *string { if o == nil { return nil } - return o.K8sVersion + return o.PublicIP } -// GetK8sVersionOk returns a tuple with the K8sVersion field value +// GetPublicIPOk returns a tuple with the PublicIP field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodeProperties) GetK8sVersionOk() (*string, bool) { +func (o *KubernetesNodeProperties) GetPublicIPOk() (*string, bool) { if o == nil { return nil, false } - return o.K8sVersion, true + return o.PublicIP, true } -// SetK8sVersion sets field value -func (o *KubernetesNodeProperties) SetK8sVersion(v string) { +// SetPublicIP sets field value +func (o *KubernetesNodeProperties) SetPublicIP(v string) { - o.K8sVersion = &v + o.PublicIP = &v } -// HasK8sVersion returns a boolean if a field has been set. -func (o *KubernetesNodeProperties) HasK8sVersion() bool { - if o != nil && o.K8sVersion != nil { +// HasPublicIP returns a boolean if a field has been set. +func (o *KubernetesNodeProperties) HasPublicIP() bool { + if o != nil && o.PublicIP != nil { return true } @@ -201,18 +201,22 @@ func (o *KubernetesNodeProperties) HasK8sVersion() bool { func (o KubernetesNodeProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.K8sVersion != nil { + toSerialize["k8sVersion"] = o.K8sVersion + } + if o.Name != nil { toSerialize["name"] = o.Name } - if o.PublicIP != nil { - toSerialize["publicIP"] = o.PublicIP - } + if o.PrivateIP != nil { toSerialize["privateIP"] = o.PrivateIP } - if o.K8sVersion != nil { - toSerialize["k8sVersion"] = o.K8sVersion + + if o.PublicIP != nil { + toSerialize["publicIP"] = o.PublicIP } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go index e54f0c8d0..a96c03848 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go @@ -16,14 +16,14 @@ import ( // KubernetesNodes struct for KubernetesNodes type KubernetesNodes struct { - // A unique representation of the Kubernetes node pool as a resource collection. - Id *string `json:"id,omitempty"` - // The resource type within a collection. - Type *string `json:"type,omitempty"` // The URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` + // A unique representation of the Kubernetes node pool as a resource collection. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]KubernetesNode `json:"items,omitempty"` + // The resource type within a collection. + Type *string `json:"type,omitempty"` } // NewKubernetesNodes instantiates a new KubernetesNodes object @@ -44,152 +44,152 @@ func NewKubernetesNodesWithDefaults() *KubernetesNodes { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodes) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodes) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodes) GetIdOk() (*string, bool) { +func (o *KubernetesNodes) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *KubernetesNodes) SetId(v string) { +// SetHref sets field value +func (o *KubernetesNodes) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *KubernetesNodes) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *KubernetesNodes) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodes) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodes) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodes) GetTypeOk() (*string, bool) { +func (o *KubernetesNodes) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *KubernetesNodes) SetType(v string) { +// SetId sets field value +func (o *KubernetesNodes) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *KubernetesNodes) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *KubernetesNodes) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *KubernetesNodes) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodes) GetItems() *[]KubernetesNode { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodes) GetHrefOk() (*string, bool) { +func (o *KubernetesNodes) GetItemsOk() (*[]KubernetesNode, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *KubernetesNodes) SetHref(v string) { +// SetItems sets field value +func (o *KubernetesNodes) SetItems(v []KubernetesNode) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *KubernetesNodes) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *KubernetesNodes) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []KubernetesNode will be returned -func (o *KubernetesNodes) GetItems() *[]KubernetesNode { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *KubernetesNodes) GetType() *string { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *KubernetesNodes) GetItemsOk() (*[]KubernetesNode, bool) { +func (o *KubernetesNodes) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *KubernetesNodes) SetItems(v []KubernetesNode) { +// SetType sets field value +func (o *KubernetesNodes) SetType(v string) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *KubernetesNodes) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *KubernetesNodes) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *KubernetesNodes) HasItems() bool { func (o KubernetesNodes) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label.go index c162e6763..f28f2af59 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label.go @@ -16,14 +16,14 @@ import ( // Label struct for Label type Label struct { - // Label is identified using standard URN. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *string `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // Label is identified using standard URN. + Id *string `json:"id,omitempty"` Metadata *NoStateMetaData `json:"metadata,omitempty"` Properties *LabelProperties `json:"properties"` + // The type of object that has been created. + Type *string `json:"type,omitempty"` } // NewLabel instantiates a new Label object @@ -46,190 +46,190 @@ func NewLabelWithDefaults() *Label { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Label) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Label) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Label) GetIdOk() (*string, bool) { +func (o *Label) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Label) SetId(v string) { +// SetHref sets field value +func (o *Label) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Label) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Label) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Label) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Label) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Label) GetTypeOk() (*string, bool) { +func (o *Label) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Label) SetType(v string) { +// SetId sets field value +func (o *Label) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Label) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Label) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Label) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Label) GetMetadata() *NoStateMetaData { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Label) GetHrefOk() (*string, bool) { +func (o *Label) GetMetadataOk() (*NoStateMetaData, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Label) SetHref(v string) { +// SetMetadata sets field value +func (o *Label) SetMetadata(v NoStateMetaData) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Label) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Label) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for NoStateMetaData will be returned -func (o *Label) GetMetadata() *NoStateMetaData { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Label) GetProperties() *LabelProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Label) GetMetadataOk() (*NoStateMetaData, bool) { +func (o *Label) GetPropertiesOk() (*LabelProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Label) SetMetadata(v NoStateMetaData) { +// SetProperties sets field value +func (o *Label) SetProperties(v LabelProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Label) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Label) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for LabelProperties will be returned -func (o *Label) GetProperties() *LabelProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Label) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Label) GetPropertiesOk() (*LabelProperties, bool) { +func (o *Label) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Label) SetProperties(v LabelProperties) { +// SetType sets field value +func (o *Label) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Label) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Label) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Label) HasProperties() bool { func (o Label) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_properties.go index a7a0d794c..f561a5fb7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_properties.go @@ -18,14 +18,14 @@ import ( type LabelProperties struct { // A label key Key *string `json:"key,omitempty"` - // A label value - Value *string `json:"value,omitempty"` + // URL to the Resource (absolute path) on which the label is applied. + ResourceHref *string `json:"resourceHref,omitempty"` // The ID of the resource. ResourceId *string `json:"resourceId,omitempty"` // The type of the resource on which the label is applied. ResourceType *string `json:"resourceType,omitempty"` - // URL to the Resource (absolute path) on which the label is applied. - ResourceHref *string `json:"resourceHref,omitempty"` + // A label value + Value *string `json:"value,omitempty"` } // NewLabelProperties instantiates a new LabelProperties object @@ -47,7 +47,7 @@ func NewLabelPropertiesWithDefaults() *LabelProperties { } // GetKey returns the Key field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LabelProperties) GetKey() *string { if o == nil { return nil @@ -84,38 +84,38 @@ func (o *LabelProperties) HasKey() bool { return false } -// GetValue returns the Value field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelProperties) GetValue() *string { +// GetResourceHref returns the ResourceHref field value +// If the value is explicit nil, nil is returned +func (o *LabelProperties) GetResourceHref() *string { if o == nil { return nil } - return o.Value + return o.ResourceHref } -// GetValueOk returns a tuple with the Value field value +// GetResourceHrefOk returns a tuple with the ResourceHref field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelProperties) GetValueOk() (*string, bool) { +func (o *LabelProperties) GetResourceHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Value, true + return o.ResourceHref, true } -// SetValue sets field value -func (o *LabelProperties) SetValue(v string) { +// SetResourceHref sets field value +func (o *LabelProperties) SetResourceHref(v string) { - o.Value = &v + o.ResourceHref = &v } -// HasValue returns a boolean if a field has been set. -func (o *LabelProperties) HasValue() bool { - if o != nil && o.Value != nil { +// HasResourceHref returns a boolean if a field has been set. +func (o *LabelProperties) HasResourceHref() bool { + if o != nil && o.ResourceHref != nil { return true } @@ -123,7 +123,7 @@ func (o *LabelProperties) HasValue() bool { } // GetResourceId returns the ResourceId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LabelProperties) GetResourceId() *string { if o == nil { return nil @@ -161,7 +161,7 @@ func (o *LabelProperties) HasResourceId() bool { } // GetResourceType returns the ResourceType field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LabelProperties) GetResourceType() *string { if o == nil { return nil @@ -198,38 +198,38 @@ func (o *LabelProperties) HasResourceType() bool { return false } -// GetResourceHref returns the ResourceHref field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelProperties) GetResourceHref() *string { +// GetValue returns the Value field value +// If the value is explicit nil, nil is returned +func (o *LabelProperties) GetValue() *string { if o == nil { return nil } - return o.ResourceHref + return o.Value } -// GetResourceHrefOk returns a tuple with the ResourceHref field value +// GetValueOk returns a tuple with the Value field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelProperties) GetResourceHrefOk() (*string, bool) { +func (o *LabelProperties) GetValueOk() (*string, bool) { if o == nil { return nil, false } - return o.ResourceHref, true + return o.Value, true } -// SetResourceHref sets field value -func (o *LabelProperties) SetResourceHref(v string) { +// SetValue sets field value +func (o *LabelProperties) SetValue(v string) { - o.ResourceHref = &v + o.Value = &v } -// HasResourceHref returns a boolean if a field has been set. -func (o *LabelProperties) HasResourceHref() bool { - if o != nil && o.ResourceHref != nil { +// HasValue returns a boolean if a field has been set. +func (o *LabelProperties) HasValue() bool { + if o != nil && o.Value != nil { return true } @@ -241,18 +241,23 @@ func (o LabelProperties) MarshalJSON() ([]byte, error) { if o.Key != nil { toSerialize["key"] = o.Key } - if o.Value != nil { - toSerialize["value"] = o.Value + + if o.ResourceHref != nil { + toSerialize["resourceHref"] = o.ResourceHref } + if o.ResourceId != nil { toSerialize["resourceId"] = o.ResourceId } + if o.ResourceType != nil { toSerialize["resourceType"] = o.ResourceType } - if o.ResourceHref != nil { - toSerialize["resourceHref"] = o.ResourceHref + + if o.Value != nil { + toSerialize["value"] = o.Value } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource.go index 29ebef96d..9963122ac 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource.go @@ -16,14 +16,14 @@ import ( // LabelResource struct for LabelResource type LabelResource struct { - // Label on a resource is identified using label key. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *string `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // Label on a resource is identified using label key. + Id *string `json:"id,omitempty"` Metadata *NoStateMetaData `json:"metadata,omitempty"` Properties *LabelResourceProperties `json:"properties"` + // The type of object that has been created. + Type *string `json:"type,omitempty"` } // NewLabelResource instantiates a new LabelResource object @@ -46,190 +46,190 @@ func NewLabelResourceWithDefaults() *LabelResource { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelResource) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *LabelResource) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResource) GetIdOk() (*string, bool) { +func (o *LabelResource) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *LabelResource) SetId(v string) { +// SetHref sets field value +func (o *LabelResource) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *LabelResource) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *LabelResource) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelResource) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *LabelResource) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResource) GetTypeOk() (*string, bool) { +func (o *LabelResource) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *LabelResource) SetType(v string) { +// SetId sets field value +func (o *LabelResource) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *LabelResource) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *LabelResource) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelResource) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *LabelResource) GetMetadata() *NoStateMetaData { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResource) GetHrefOk() (*string, bool) { +func (o *LabelResource) GetMetadataOk() (*NoStateMetaData, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *LabelResource) SetHref(v string) { +// SetMetadata sets field value +func (o *LabelResource) SetMetadata(v NoStateMetaData) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *LabelResource) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *LabelResource) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for NoStateMetaData will be returned -func (o *LabelResource) GetMetadata() *NoStateMetaData { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *LabelResource) GetProperties() *LabelResourceProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResource) GetMetadataOk() (*NoStateMetaData, bool) { +func (o *LabelResource) GetPropertiesOk() (*LabelResourceProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *LabelResource) SetMetadata(v NoStateMetaData) { +// SetProperties sets field value +func (o *LabelResource) SetProperties(v LabelResourceProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *LabelResource) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *LabelResource) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for LabelResourceProperties will be returned -func (o *LabelResource) GetProperties() *LabelResourceProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *LabelResource) GetType() *string { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResource) GetPropertiesOk() (*LabelResourceProperties, bool) { +func (o *LabelResource) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *LabelResource) SetProperties(v LabelResourceProperties) { +// SetType sets field value +func (o *LabelResource) SetType(v string) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *LabelResource) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *LabelResource) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *LabelResource) HasProperties() bool { func (o LabelResource) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource_properties.go index ef0836dff..e83bc0c9f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resource_properties.go @@ -41,7 +41,7 @@ func NewLabelResourcePropertiesWithDefaults() *LabelResourceProperties { } // GetKey returns the Key field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LabelResourceProperties) GetKey() *string { if o == nil { return nil @@ -79,7 +79,7 @@ func (o *LabelResourceProperties) HasKey() bool { } // GetValue returns the Value field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LabelResourceProperties) GetValue() *string { if o == nil { return nil @@ -121,9 +121,11 @@ func (o LabelResourceProperties) MarshalJSON() ([]byte, error) { if o.Key != nil { toSerialize["key"] = o.Key } + if o.Value != nil { toSerialize["value"] = o.Value } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resources.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resources.go index 673f4f328..43bf07e05 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resources.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_label_resources.go @@ -16,19 +16,19 @@ import ( // LabelResources struct for LabelResources type LabelResources struct { - // A unique representation of the label as a resource collection. - Id *string `json:"id,omitempty"` - // The type of resource within a collection. - Type *string `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` + // A unique representation of the label as a resource collection. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]LabelResource `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of resource within a collection. + Type *string `json:"type,omitempty"` } // NewLabelResources instantiates a new LabelResources object @@ -49,114 +49,114 @@ func NewLabelResourcesWithDefaults() *LabelResources { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelResources) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *LabelResources) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResources) GetIdOk() (*string, bool) { +func (o *LabelResources) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *LabelResources) SetId(v string) { +// SetLinks sets field value +func (o *LabelResources) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *LabelResources) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *LabelResources) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelResources) GetType() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *LabelResources) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResources) GetTypeOk() (*string, bool) { +func (o *LabelResources) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *LabelResources) SetType(v string) { +// SetHref sets field value +func (o *LabelResources) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *LabelResources) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *LabelResources) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LabelResources) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *LabelResources) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResources) GetHrefOk() (*string, bool) { +func (o *LabelResources) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *LabelResources) SetHref(v string) { +// SetId sets field value +func (o *LabelResources) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *LabelResources) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *LabelResources) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *LabelResources) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []LabelResource will be returned +// If the value is explicit nil, nil is returned func (o *LabelResources) GetItems() *[]LabelResource { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *LabelResources) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *LabelResources) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *LabelResources) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResources) GetOffsetOk() (*float32, bool) { +func (o *LabelResources) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *LabelResources) SetOffset(v float32) { +// SetLimit sets field value +func (o *LabelResources) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *LabelResources) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *LabelResources) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *LabelResources) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *LabelResources) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResources) GetLimitOk() (*float32, bool) { +func (o *LabelResources) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *LabelResources) SetLimit(v float32) { +// SetOffset sets field value +func (o *LabelResources) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *LabelResources) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *LabelResources) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *LabelResources) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *LabelResources) GetType() *string { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LabelResources) GetLinksOk() (*PaginationLinks, bool) { +func (o *LabelResources) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *LabelResources) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *LabelResources) SetType(v string) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *LabelResources) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *LabelResources) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *LabelResources) HasLinks() bool { func (o LabelResources) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_labels.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_labels.go index d83bdbcfc..f2be7da33 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_labels.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_labels.go @@ -16,14 +16,14 @@ import ( // Labels struct for Labels type Labels struct { - // A unique representation of the label as a resource collection. - Id *string `json:"id,omitempty"` - // The type of resource within a collection. - Type *string `json:"type,omitempty"` // URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` + // A unique representation of the label as a resource collection. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Label `json:"items,omitempty"` + // The type of resource within a collection. + Type *string `json:"type,omitempty"` } // NewLabels instantiates a new Labels object @@ -44,152 +44,152 @@ func NewLabelsWithDefaults() *Labels { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Labels) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Labels) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Labels) GetIdOk() (*string, bool) { +func (o *Labels) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Labels) SetId(v string) { +// SetHref sets field value +func (o *Labels) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Labels) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Labels) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Labels) GetType() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Labels) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Labels) GetTypeOk() (*string, bool) { +func (o *Labels) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Labels) SetType(v string) { +// SetId sets field value +func (o *Labels) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Labels) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Labels) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Labels) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Labels) GetItems() *[]Label { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Labels) GetHrefOk() (*string, bool) { +func (o *Labels) GetItemsOk() (*[]Label, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Labels) SetHref(v string) { +// SetItems sets field value +func (o *Labels) SetItems(v []Label) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Labels) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Labels) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Label will be returned -func (o *Labels) GetItems() *[]Label { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Labels) GetType() *string { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Labels) GetItemsOk() (*[]Label, bool) { +func (o *Labels) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Labels) SetItems(v []Label) { +// SetType sets field value +func (o *Labels) SetType(v string) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Labels) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Labels) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Labels) HasItems() bool { func (o Labels) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan.go index 120c2f17f..09a44f4e3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan.go @@ -16,15 +16,15 @@ import ( // Lan struct for Lan type Lan struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *LanEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *LanProperties `json:"properties"` - Entities *LanEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLan instantiates a new Lan object @@ -47,114 +47,114 @@ func NewLanWithDefaults() *Lan { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Lan) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Lan) GetEntities() *LanEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lan) GetIdOk() (*string, bool) { +func (o *Lan) GetEntitiesOk() (*LanEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Lan) SetId(v string) { +// SetEntities sets field value +func (o *Lan) SetEntities(v LanEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Lan) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Lan) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Lan) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Lan) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lan) GetTypeOk() (*Type, bool) { +func (o *Lan) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Lan) SetType(v Type) { +// SetHref sets field value +func (o *Lan) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Lan) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Lan) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Lan) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Lan) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lan) GetHrefOk() (*string, bool) { +func (o *Lan) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Lan) SetHref(v string) { +// SetId sets field value +func (o *Lan) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Lan) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Lan) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *Lan) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *Lan) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *Lan) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for LanProperties will be returned +// If the value is explicit nil, nil is returned func (o *Lan) GetProperties() *LanProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *Lan) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for LanEntities will be returned -func (o *Lan) GetEntities() *LanEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Lan) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lan) GetEntitiesOk() (*LanEntities, bool) { +func (o *Lan) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Lan) SetEntities(v LanEntities) { +// SetType sets field value +func (o *Lan) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Lan) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Lan) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *Lan) HasEntities() bool { func (o Lan) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_entities.go index f4d242ac8..368746a43 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_entities.go @@ -38,7 +38,7 @@ func NewLanEntitiesWithDefaults() *LanEntities { } // GetNics returns the Nics field value -// If the value is explicit nil, the zero value for LanNics will be returned +// If the value is explicit nil, nil is returned func (o *LanEntities) GetNics() *LanNics { if o == nil { return nil @@ -80,6 +80,7 @@ func (o LanEntities) MarshalJSON() ([]byte, error) { if o.Nics != nil { toSerialize["nics"] = o.Nics } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_nics.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_nics.go index f348c1458..06cc683fe 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_nics.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_nics.go @@ -16,19 +16,19 @@ import ( // LanNics struct for LanNics type LanNics struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Nic `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLanNics instantiates a new LanNics object @@ -49,114 +49,114 @@ func NewLanNicsWithDefaults() *LanNics { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanNics) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *LanNics) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanNics) GetIdOk() (*string, bool) { +func (o *LanNics) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *LanNics) SetId(v string) { +// SetLinks sets field value +func (o *LanNics) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *LanNics) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *LanNics) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *LanNics) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *LanNics) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanNics) GetTypeOk() (*Type, bool) { +func (o *LanNics) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *LanNics) SetType(v Type) { +// SetHref sets field value +func (o *LanNics) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *LanNics) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *LanNics) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanNics) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *LanNics) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanNics) GetHrefOk() (*string, bool) { +func (o *LanNics) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *LanNics) SetHref(v string) { +// SetId sets field value +func (o *LanNics) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *LanNics) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *LanNics) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *LanNics) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Nic will be returned +// If the value is explicit nil, nil is returned func (o *LanNics) GetItems() *[]Nic { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *LanNics) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *LanNics) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *LanNics) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanNics) GetOffsetOk() (*float32, bool) { +func (o *LanNics) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *LanNics) SetOffset(v float32) { +// SetLimit sets field value +func (o *LanNics) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *LanNics) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *LanNics) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *LanNics) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *LanNics) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanNics) GetLimitOk() (*float32, bool) { +func (o *LanNics) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *LanNics) SetLimit(v float32) { +// SetOffset sets field value +func (o *LanNics) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *LanNics) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *LanNics) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *LanNics) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *LanNics) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanNics) GetLinksOk() (*PaginationLinks, bool) { +func (o *LanNics) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *LanNics) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *LanNics) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *LanNics) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *LanNics) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *LanNics) HasLinks() bool { func (o LanNics) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go new file mode 100644 index 000000000..ce97f15f0 --- /dev/null +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go @@ -0,0 +1,341 @@ +/* + * CLOUD API + * + * IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. + * + * API version: 6.0 + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package ionoscloud + +import ( + "encoding/json" +) + +// LanPost struct for LanPost +type LanPost struct { + Entities *LanEntities `json:"entities,omitempty"` + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` + Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` + Properties *LanPropertiesPost `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` +} + +// NewLanPost instantiates a new LanPost object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewLanPost(properties LanPropertiesPost) *LanPost { + this := LanPost{} + + this.Properties = &properties + + return &this +} + +// NewLanPostWithDefaults instantiates a new LanPost object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewLanPostWithDefaults() *LanPost { + this := LanPost{} + return &this +} + +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *LanPost) GetEntities() *LanEntities { + if o == nil { + return nil + } + + return o.Entities + +} + +// GetEntitiesOk returns a tuple with the Entities field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPost) GetEntitiesOk() (*LanEntities, bool) { + if o == nil { + return nil, false + } + + return o.Entities, true +} + +// SetEntities sets field value +func (o *LanPost) SetEntities(v LanEntities) { + + o.Entities = &v + +} + +// HasEntities returns a boolean if a field has been set. +func (o *LanPost) HasEntities() bool { + if o != nil && o.Entities != nil { + return true + } + + return false +} + +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *LanPost) GetHref() *string { + if o == nil { + return nil + } + + return o.Href + +} + +// GetHrefOk returns a tuple with the Href field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPost) GetHrefOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Href, true +} + +// SetHref sets field value +func (o *LanPost) SetHref(v string) { + + o.Href = &v + +} + +// HasHref returns a boolean if a field has been set. +func (o *LanPost) HasHref() bool { + if o != nil && o.Href != nil { + return true + } + + return false +} + +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *LanPost) GetId() *string { + if o == nil { + return nil + } + + return o.Id + +} + +// GetIdOk returns a tuple with the Id field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPost) GetIdOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Id, true +} + +// SetId sets field value +func (o *LanPost) SetId(v string) { + + o.Id = &v + +} + +// HasId returns a boolean if a field has been set. +func (o *LanPost) HasId() bool { + if o != nil && o.Id != nil { + return true + } + + return false +} + +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *LanPost) GetMetadata() *DatacenterElementMetadata { + if o == nil { + return nil + } + + return o.Metadata + +} + +// GetMetadataOk returns a tuple with the Metadata field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPost) GetMetadataOk() (*DatacenterElementMetadata, bool) { + if o == nil { + return nil, false + } + + return o.Metadata, true +} + +// SetMetadata sets field value +func (o *LanPost) SetMetadata(v DatacenterElementMetadata) { + + o.Metadata = &v + +} + +// HasMetadata returns a boolean if a field has been set. +func (o *LanPost) HasMetadata() bool { + if o != nil && o.Metadata != nil { + return true + } + + return false +} + +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *LanPost) GetProperties() *LanPropertiesPost { + if o == nil { + return nil + } + + return o.Properties + +} + +// GetPropertiesOk returns a tuple with the Properties field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPost) GetPropertiesOk() (*LanPropertiesPost, bool) { + if o == nil { + return nil, false + } + + return o.Properties, true +} + +// SetProperties sets field value +func (o *LanPost) SetProperties(v LanPropertiesPost) { + + o.Properties = &v + +} + +// HasProperties returns a boolean if a field has been set. +func (o *LanPost) HasProperties() bool { + if o != nil && o.Properties != nil { + return true + } + + return false +} + +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *LanPost) GetType() *Type { + if o == nil { + return nil + } + + return o.Type + +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPost) GetTypeOk() (*Type, bool) { + if o == nil { + return nil, false + } + + return o.Type, true +} + +// SetType sets field value +func (o *LanPost) SetType(v Type) { + + o.Type = &v + +} + +// HasType returns a boolean if a field has been set. +func (o *LanPost) HasType() bool { + if o != nil && o.Type != nil { + return true + } + + return false +} + +func (o LanPost) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if o.Entities != nil { + toSerialize["entities"] = o.Entities + } + + if o.Href != nil { + toSerialize["href"] = o.Href + } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + + if o.Metadata != nil { + toSerialize["metadata"] = o.Metadata + } + + if o.Properties != nil { + toSerialize["properties"] = o.Properties + } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + + return json.Marshal(toSerialize) +} + +type NullableLanPost struct { + value *LanPost + isSet bool +} + +func (v NullableLanPost) Get() *LanPost { + return v.value +} + +func (v *NullableLanPost) Set(val *LanPost) { + v.value = val + v.isSet = true +} + +func (v NullableLanPost) IsSet() bool { + return v.isSet +} + +func (v *NullableLanPost) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableLanPost(val *LanPost) *NullableLanPost { + return &NullableLanPost{value: val, isSet: true} +} + +func (v NullableLanPost) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableLanPost) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties.go index 763ced62b..a50a06938 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties.go @@ -16,10 +16,13 @@ import ( // LanProperties struct for LanProperties type LanProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` // IP failover configurations for lan IpFailover *[]IPFailover `json:"ipFailover,omitempty"` + // [The IPv6 feature is in beta phase and not ready for production usage.] For a GET request, this value is either 'null' or contains the LAN's /64 IPv6 CIDR block if this LAN is IPv6 enabled. For POST/PUT/PATCH requests, 'AUTO' will result in enabling this LAN for IPv6 and automatically assign a /64 IPv6 CIDR block to this LAN and /80 IPv6 CIDR blocks to the NICs and one /128 IPv6 address to each connected NIC. If you choose the IPv6 CIDR block for the LAN on your own, then you must provide a /64 block, which is inside the IPv6 CIDR block of the virtual datacenter and unique inside all LANs from this virtual datacenter. If you enable IPv6 on a LAN with NICs, those NICs will get a /80 IPv6 CIDR block and one IPv6 address assigned to each automatically, unless you specify them explicitly on the LAN and on the NICs. A virtual data center is limited to a maximum of 256 IPv6-enabled LANs. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetIpv6CidrBlockNil` + Ipv6CidrBlock *string `json:"ipv6CidrBlock,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` // The unique identifier of the private Cross-Connect the LAN is connected to, if any. Pcc *string `json:"pcc,omitempty"` // This LAN faces the public Internet. @@ -44,76 +47,119 @@ func NewLanPropertiesWithDefaults() *LanProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanProperties) GetName() *string { +// GetIpFailover returns the IpFailover field value +// If the value is explicit nil, nil is returned +func (o *LanProperties) GetIpFailover() *[]IPFailover { if o == nil { return nil } - return o.Name + return o.IpFailover } -// GetNameOk returns a tuple with the Name field value +// GetIpFailoverOk returns a tuple with the IpFailover field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanProperties) GetNameOk() (*string, bool) { +func (o *LanProperties) GetIpFailoverOk() (*[]IPFailover, bool) { if o == nil { return nil, false } - return o.Name, true + return o.IpFailover, true } -// SetName sets field value -func (o *LanProperties) SetName(v string) { +// SetIpFailover sets field value +func (o *LanProperties) SetIpFailover(v []IPFailover) { - o.Name = &v + o.IpFailover = &v } -// HasName returns a boolean if a field has been set. -func (o *LanProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasIpFailover returns a boolean if a field has been set. +func (o *LanProperties) HasIpFailover() bool { + if o != nil && o.IpFailover != nil { return true } return false } -// GetIpFailover returns the IpFailover field value -// If the value is explicit nil, the zero value for []IPFailover will be returned -func (o *LanProperties) GetIpFailover() *[]IPFailover { +// GetIpv6CidrBlock returns the Ipv6CidrBlock field value +// If the value is explicit nil, nil is returned +func (o *LanProperties) GetIpv6CidrBlock() *string { if o == nil { return nil } - return o.IpFailover + return o.Ipv6CidrBlock } -// GetIpFailoverOk returns a tuple with the IpFailover field value +// GetIpv6CidrBlockOk returns a tuple with the Ipv6CidrBlock field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanProperties) GetIpFailoverOk() (*[]IPFailover, bool) { +func (o *LanProperties) GetIpv6CidrBlockOk() (*string, bool) { if o == nil { return nil, false } - return o.IpFailover, true + return o.Ipv6CidrBlock, true } -// SetIpFailover sets field value -func (o *LanProperties) SetIpFailover(v []IPFailover) { +// SetIpv6CidrBlock sets field value +func (o *LanProperties) SetIpv6CidrBlock(v string) { - o.IpFailover = &v + o.Ipv6CidrBlock = &v } -// HasIpFailover returns a boolean if a field has been set. -func (o *LanProperties) HasIpFailover() bool { - if o != nil && o.IpFailover != nil { +// sets Ipv6CidrBlock to the explicit address that will be encoded as nil when marshaled +func (o *LanProperties) SetIpv6CidrBlockNil() { + o.Ipv6CidrBlock = &Nilstring +} + +// HasIpv6CidrBlock returns a boolean if a field has been set. +func (o *LanProperties) HasIpv6CidrBlock() bool { + if o != nil && o.Ipv6CidrBlock != nil { + return true + } + + return false +} + +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *LanProperties) GetName() *string { + if o == nil { + return nil + } + + return o.Name + +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanProperties) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Name, true +} + +// SetName sets field value +func (o *LanProperties) SetName(v string) { + + o.Name = &v + +} + +// HasName returns a boolean if a field has been set. +func (o *LanProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -121,7 +167,7 @@ func (o *LanProperties) HasIpFailover() bool { } // GetPcc returns the Pcc field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LanProperties) GetPcc() *string { if o == nil { return nil @@ -159,7 +205,7 @@ func (o *LanProperties) HasPcc() bool { } // GetPublic returns the Public field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *LanProperties) GetPublic() *bool { if o == nil { return nil @@ -198,18 +244,27 @@ func (o *LanProperties) HasPublic() bool { func (o LanProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } if o.IpFailover != nil { toSerialize["ipFailover"] = o.IpFailover } + + if o.Ipv6CidrBlock == &Nilstring { + toSerialize["ipv6CidrBlock"] = nil + } else if o.Ipv6CidrBlock != nil { + toSerialize["ipv6CidrBlock"] = o.Ipv6CidrBlock + } + if o.Name != nil { + toSerialize["name"] = o.Name + } + if o.Pcc != nil { toSerialize["pcc"] = o.Pcc } + if o.Public != nil { toSerialize["public"] = o.Public } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go new file mode 100644 index 000000000..387effb4c --- /dev/null +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go @@ -0,0 +1,305 @@ +/* + * CLOUD API + * + * IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. + * + * API version: 6.0 + */ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package ionoscloud + +import ( + "encoding/json" +) + +// LanPropertiesPost struct for LanPropertiesPost +type LanPropertiesPost struct { + // IP failover configurations for lan + IpFailover *[]IPFailover `json:"ipFailover,omitempty"` + // [The IPv6 feature is in beta phase and not ready for production usage.] For a GET request, this value is either 'null' or contains the LAN's /64 IPv6 CIDR block if this LAN is IPv6-enabled. For POST/PUT/PATCH requests, 'AUTO' will result in enabling this LAN for IPv6 and automatically assign a /64 IPv6 CIDR block to this LAN. If you choose the IPv6 CIDR block on your own, then you must provide a /64 block, which is inside the IPv6 CIDR block of the virtual datacenter and unique inside all LANs from this virtual datacenter. If you enable IPv6 on a LAN with NICs, those NICs will get a /80 IPv6 CIDR block and one IPv6 address assigned to each automatically, unless you specify them explicitly on the NICs. A virtual data center is limited to a maximum of 256 IPv6-enabled LANs. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetIpv6CidrBlockNil` + Ipv6CidrBlock *string `json:"ipv6CidrBlock,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` + // The unique identifier of the private Cross-Connect the LAN is connected to, if any. + Pcc *string `json:"pcc,omitempty"` + // This LAN faces the public Internet. + Public *bool `json:"public,omitempty"` +} + +// NewLanPropertiesPost instantiates a new LanPropertiesPost object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewLanPropertiesPost() *LanPropertiesPost { + this := LanPropertiesPost{} + + return &this +} + +// NewLanPropertiesPostWithDefaults instantiates a new LanPropertiesPost object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewLanPropertiesPostWithDefaults() *LanPropertiesPost { + this := LanPropertiesPost{} + return &this +} + +// GetIpFailover returns the IpFailover field value +// If the value is explicit nil, nil is returned +func (o *LanPropertiesPost) GetIpFailover() *[]IPFailover { + if o == nil { + return nil + } + + return o.IpFailover + +} + +// GetIpFailoverOk returns a tuple with the IpFailover field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPropertiesPost) GetIpFailoverOk() (*[]IPFailover, bool) { + if o == nil { + return nil, false + } + + return o.IpFailover, true +} + +// SetIpFailover sets field value +func (o *LanPropertiesPost) SetIpFailover(v []IPFailover) { + + o.IpFailover = &v + +} + +// HasIpFailover returns a boolean if a field has been set. +func (o *LanPropertiesPost) HasIpFailover() bool { + if o != nil && o.IpFailover != nil { + return true + } + + return false +} + +// GetIpv6CidrBlock returns the Ipv6CidrBlock field value +// If the value is explicit nil, nil is returned +func (o *LanPropertiesPost) GetIpv6CidrBlock() *string { + if o == nil { + return nil + } + + return o.Ipv6CidrBlock + +} + +// GetIpv6CidrBlockOk returns a tuple with the Ipv6CidrBlock field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPropertiesPost) GetIpv6CidrBlockOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Ipv6CidrBlock, true +} + +// SetIpv6CidrBlock sets field value +func (o *LanPropertiesPost) SetIpv6CidrBlock(v string) { + + o.Ipv6CidrBlock = &v + +} + +// sets Ipv6CidrBlock to the explicit address that will be encoded as nil when marshaled +func (o *LanPropertiesPost) SetIpv6CidrBlockNil() { + o.Ipv6CidrBlock = &Nilstring +} + +// HasIpv6CidrBlock returns a boolean if a field has been set. +func (o *LanPropertiesPost) HasIpv6CidrBlock() bool { + if o != nil && o.Ipv6CidrBlock != nil { + return true + } + + return false +} + +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *LanPropertiesPost) GetName() *string { + if o == nil { + return nil + } + + return o.Name + +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPropertiesPost) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Name, true +} + +// SetName sets field value +func (o *LanPropertiesPost) SetName(v string) { + + o.Name = &v + +} + +// HasName returns a boolean if a field has been set. +func (o *LanPropertiesPost) HasName() bool { + if o != nil && o.Name != nil { + return true + } + + return false +} + +// GetPcc returns the Pcc field value +// If the value is explicit nil, nil is returned +func (o *LanPropertiesPost) GetPcc() *string { + if o == nil { + return nil + } + + return o.Pcc + +} + +// GetPccOk returns a tuple with the Pcc field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPropertiesPost) GetPccOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Pcc, true +} + +// SetPcc sets field value +func (o *LanPropertiesPost) SetPcc(v string) { + + o.Pcc = &v + +} + +// HasPcc returns a boolean if a field has been set. +func (o *LanPropertiesPost) HasPcc() bool { + if o != nil && o.Pcc != nil { + return true + } + + return false +} + +// GetPublic returns the Public field value +// If the value is explicit nil, nil is returned +func (o *LanPropertiesPost) GetPublic() *bool { + if o == nil { + return nil + } + + return o.Public + +} + +// GetPublicOk returns a tuple with the Public field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *LanPropertiesPost) GetPublicOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.Public, true +} + +// SetPublic sets field value +func (o *LanPropertiesPost) SetPublic(v bool) { + + o.Public = &v + +} + +// HasPublic returns a boolean if a field has been set. +func (o *LanPropertiesPost) HasPublic() bool { + if o != nil && o.Public != nil { + return true + } + + return false +} + +func (o LanPropertiesPost) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if o.IpFailover != nil { + toSerialize["ipFailover"] = o.IpFailover + } + + if o.Ipv6CidrBlock == &Nilstring { + toSerialize["ipv6CidrBlock"] = nil + } else if o.Ipv6CidrBlock != nil { + toSerialize["ipv6CidrBlock"] = o.Ipv6CidrBlock + } + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.Pcc != nil { + toSerialize["pcc"] = o.Pcc + } + + if o.Public != nil { + toSerialize["public"] = o.Public + } + + return json.Marshal(toSerialize) +} + +type NullableLanPropertiesPost struct { + value *LanPropertiesPost + isSet bool +} + +func (v NullableLanPropertiesPost) Get() *LanPropertiesPost { + return v.value +} + +func (v *NullableLanPropertiesPost) Set(val *LanPropertiesPost) { + v.value = val + v.isSet = true +} + +func (v NullableLanPropertiesPost) IsSet() bool { + return v.isSet +} + +func (v *NullableLanPropertiesPost) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableLanPropertiesPost(val *LanPropertiesPost) *NullableLanPropertiesPost { + return &NullableLanPropertiesPost{value: val, isSet: true} +} + +func (v NullableLanPropertiesPost) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableLanPropertiesPost) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lans.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lans.go index 361b3809f..b0ee52ca2 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lans.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lans.go @@ -16,19 +16,19 @@ import ( // Lans struct for Lans type Lans struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Lan `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLans instantiates a new Lans object @@ -49,114 +49,114 @@ func NewLansWithDefaults() *Lans { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Lans) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Lans) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lans) GetIdOk() (*string, bool) { +func (o *Lans) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Lans) SetId(v string) { +// SetLinks sets field value +func (o *Lans) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Lans) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Lans) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Lans) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Lans) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lans) GetTypeOk() (*Type, bool) { +func (o *Lans) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Lans) SetType(v Type) { +// SetHref sets field value +func (o *Lans) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Lans) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Lans) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Lans) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Lans) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lans) GetHrefOk() (*string, bool) { +func (o *Lans) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Lans) SetHref(v string) { +// SetId sets field value +func (o *Lans) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Lans) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Lans) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Lans) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Lan will be returned +// If the value is explicit nil, nil is returned func (o *Lans) GetItems() *[]Lan { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Lans) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Lans) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Lans) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lans) GetOffsetOk() (*float32, bool) { +func (o *Lans) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Lans) SetOffset(v float32) { +// SetLimit sets field value +func (o *Lans) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Lans) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Lans) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Lans) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Lans) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lans) GetLimitOk() (*float32, bool) { +func (o *Lans) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Lans) SetLimit(v float32) { +// SetOffset sets field value +func (o *Lans) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Lans) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Lans) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Lans) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Lans) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Lans) GetLinksOk() (*PaginationLinks, bool) { +func (o *Lans) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Lans) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Lans) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Lans) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Lans) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Lans) HasLinks() bool { func (o Lans) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer.go index 822d7f1c3..064e5eb99 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer.go @@ -16,15 +16,15 @@ import ( // Loadbalancer struct for Loadbalancer type Loadbalancer struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *LoadbalancerEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *LoadbalancerProperties `json:"properties"` - Entities *LoadbalancerEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLoadbalancer instantiates a new Loadbalancer object @@ -47,114 +47,114 @@ func NewLoadbalancerWithDefaults() *Loadbalancer { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Loadbalancer) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancer) GetEntities() *LoadbalancerEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancer) GetIdOk() (*string, bool) { +func (o *Loadbalancer) GetEntitiesOk() (*LoadbalancerEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Loadbalancer) SetId(v string) { +// SetEntities sets field value +func (o *Loadbalancer) SetEntities(v LoadbalancerEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Loadbalancer) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Loadbalancer) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Loadbalancer) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancer) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancer) GetTypeOk() (*Type, bool) { +func (o *Loadbalancer) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Loadbalancer) SetType(v Type) { +// SetHref sets field value +func (o *Loadbalancer) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Loadbalancer) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Loadbalancer) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Loadbalancer) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancer) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancer) GetHrefOk() (*string, bool) { +func (o *Loadbalancer) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Loadbalancer) SetHref(v string) { +// SetId sets field value +func (o *Loadbalancer) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Loadbalancer) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Loadbalancer) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *Loadbalancer) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *Loadbalancer) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *Loadbalancer) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for LoadbalancerProperties will be returned +// If the value is explicit nil, nil is returned func (o *Loadbalancer) GetProperties() *LoadbalancerProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *Loadbalancer) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for LoadbalancerEntities will be returned -func (o *Loadbalancer) GetEntities() *LoadbalancerEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancer) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancer) GetEntitiesOk() (*LoadbalancerEntities, bool) { +func (o *Loadbalancer) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Loadbalancer) SetEntities(v LoadbalancerEntities) { +// SetType sets field value +func (o *Loadbalancer) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Loadbalancer) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Loadbalancer) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *Loadbalancer) HasEntities() bool { func (o Loadbalancer) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_entities.go index 129844dcb..8a4c5049c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_entities.go @@ -38,7 +38,7 @@ func NewLoadbalancerEntitiesWithDefaults() *LoadbalancerEntities { } // GetBalancednics returns the Balancednics field value -// If the value is explicit nil, the zero value for BalancedNics will be returned +// If the value is explicit nil, nil is returned func (o *LoadbalancerEntities) GetBalancednics() *BalancedNics { if o == nil { return nil @@ -80,6 +80,7 @@ func (o LoadbalancerEntities) MarshalJSON() ([]byte, error) { if o.Balancednics != nil { toSerialize["balancednics"] = o.Balancednics } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_properties.go index e6b9b02fb..9d1f3cee7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancer_properties.go @@ -16,12 +16,13 @@ import ( // LoadbalancerProperties struct for LoadbalancerProperties type LoadbalancerProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` - // IPv4 address of the loadbalancer. All attached NICs will inherit this IP. Leaving value null will assign IP automatically. - Ip *string `json:"ip,omitempty"` // Indicates if the loadbalancer will reserve an IP using DHCP. Dhcp *bool `json:"dhcp,omitempty"` + // IPv4 address of the loadbalancer. All attached NICs will inherit this IP. Leaving value null will assign IP automatically. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetIpNil` + Ip *string `json:"ip,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` } // NewLoadbalancerProperties instantiates a new LoadbalancerProperties object @@ -42,38 +43,38 @@ func NewLoadbalancerPropertiesWithDefaults() *LoadbalancerProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LoadbalancerProperties) GetName() *string { +// GetDhcp returns the Dhcp field value +// If the value is explicit nil, nil is returned +func (o *LoadbalancerProperties) GetDhcp() *bool { if o == nil { return nil } - return o.Name + return o.Dhcp } -// GetNameOk returns a tuple with the Name field value +// GetDhcpOk returns a tuple with the Dhcp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LoadbalancerProperties) GetNameOk() (*string, bool) { +func (o *LoadbalancerProperties) GetDhcpOk() (*bool, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Dhcp, true } -// SetName sets field value -func (o *LoadbalancerProperties) SetName(v string) { +// SetDhcp sets field value +func (o *LoadbalancerProperties) SetDhcp(v bool) { - o.Name = &v + o.Dhcp = &v } -// HasName returns a boolean if a field has been set. -func (o *LoadbalancerProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasDhcp returns a boolean if a field has been set. +func (o *LoadbalancerProperties) HasDhcp() bool { + if o != nil && o.Dhcp != nil { return true } @@ -81,7 +82,7 @@ func (o *LoadbalancerProperties) HasName() bool { } // GetIp returns the Ip field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *LoadbalancerProperties) GetIp() *string { if o == nil { return nil @@ -109,6 +110,11 @@ func (o *LoadbalancerProperties) SetIp(v string) { } +// sets Ip to the explicit address that will be encoded as nil when marshaled +func (o *LoadbalancerProperties) SetIpNil() { + o.Ip = &Nilstring +} + // HasIp returns a boolean if a field has been set. func (o *LoadbalancerProperties) HasIp() bool { if o != nil && o.Ip != nil { @@ -118,38 +124,38 @@ func (o *LoadbalancerProperties) HasIp() bool { return false } -// GetDhcp returns the Dhcp field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *LoadbalancerProperties) GetDhcp() *bool { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *LoadbalancerProperties) GetName() *string { if o == nil { return nil } - return o.Dhcp + return o.Name } -// GetDhcpOk returns a tuple with the Dhcp field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LoadbalancerProperties) GetDhcpOk() (*bool, bool) { +func (o *LoadbalancerProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Dhcp, true + return o.Name, true } -// SetDhcp sets field value -func (o *LoadbalancerProperties) SetDhcp(v bool) { +// SetName sets field value +func (o *LoadbalancerProperties) SetName(v string) { - o.Dhcp = &v + o.Name = &v } -// HasDhcp returns a boolean if a field has been set. -func (o *LoadbalancerProperties) HasDhcp() bool { - if o != nil && o.Dhcp != nil { +// HasName returns a boolean if a field has been set. +func (o *LoadbalancerProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -158,13 +164,19 @@ func (o *LoadbalancerProperties) HasDhcp() bool { func (o LoadbalancerProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } - toSerialize["ip"] = o.Ip if o.Dhcp != nil { toSerialize["dhcp"] = o.Dhcp } + + if o.Ip == &Nilstring { + toSerialize["ip"] = nil + } else if o.Ip != nil { + toSerialize["ip"] = o.Ip + } + if o.Name != nil { + toSerialize["name"] = o.Name + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancers.go index 166560f99..267eba36f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_loadbalancers.go @@ -16,19 +16,19 @@ import ( // Loadbalancers struct for Loadbalancers type Loadbalancers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Loadbalancer `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLoadbalancers instantiates a new Loadbalancers object @@ -49,114 +49,114 @@ func NewLoadbalancersWithDefaults() *Loadbalancers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Loadbalancers) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancers) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancers) GetIdOk() (*string, bool) { +func (o *Loadbalancers) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Loadbalancers) SetId(v string) { +// SetLinks sets field value +func (o *Loadbalancers) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Loadbalancers) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Loadbalancers) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Loadbalancers) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancers) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancers) GetTypeOk() (*Type, bool) { +func (o *Loadbalancers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Loadbalancers) SetType(v Type) { +// SetHref sets field value +func (o *Loadbalancers) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Loadbalancers) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Loadbalancers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Loadbalancers) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancers) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancers) GetHrefOk() (*string, bool) { +func (o *Loadbalancers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Loadbalancers) SetHref(v string) { +// SetId sets field value +func (o *Loadbalancers) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Loadbalancers) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Loadbalancers) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Loadbalancers) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Loadbalancer will be returned +// If the value is explicit nil, nil is returned func (o *Loadbalancers) GetItems() *[]Loadbalancer { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Loadbalancers) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Loadbalancers) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancers) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancers) GetOffsetOk() (*float32, bool) { +func (o *Loadbalancers) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Loadbalancers) SetOffset(v float32) { +// SetLimit sets field value +func (o *Loadbalancers) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Loadbalancers) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Loadbalancers) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Loadbalancers) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancers) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancers) GetLimitOk() (*float32, bool) { +func (o *Loadbalancers) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Loadbalancers) SetLimit(v float32) { +// SetOffset sets field value +func (o *Loadbalancers) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Loadbalancers) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Loadbalancers) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Loadbalancers) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Loadbalancers) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Loadbalancers) GetLinksOk() (*PaginationLinks, bool) { +func (o *Loadbalancers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Loadbalancers) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Loadbalancers) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Loadbalancers) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Loadbalancers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Loadbalancers) HasLinks() bool { func (o Loadbalancers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_location.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_location.go index 59a250d45..379723ff1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_location.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_location.go @@ -16,14 +16,14 @@ import ( // Location struct for Location type Location struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *LocationProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLocation instantiates a new Location object @@ -46,190 +46,190 @@ func NewLocationWithDefaults() *Location { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Location) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Location) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Location) GetIdOk() (*string, bool) { +func (o *Location) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Location) SetId(v string) { +// SetHref sets field value +func (o *Location) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Location) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Location) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Location) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Location) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Location) GetTypeOk() (*Type, bool) { +func (o *Location) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Location) SetType(v Type) { +// SetId sets field value +func (o *Location) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Location) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Location) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Location) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Location) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Location) GetHrefOk() (*string, bool) { +func (o *Location) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Location) SetHref(v string) { +// SetMetadata sets field value +func (o *Location) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Location) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Location) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *Location) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Location) GetProperties() *LocationProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Location) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *Location) GetPropertiesOk() (*LocationProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Location) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *Location) SetProperties(v LocationProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Location) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Location) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for LocationProperties will be returned -func (o *Location) GetProperties() *LocationProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Location) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Location) GetPropertiesOk() (*LocationProperties, bool) { +func (o *Location) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Location) SetProperties(v LocationProperties) { +// SetType sets field value +func (o *Location) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Location) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Location) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Location) HasProperties() bool { func (o Location) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go index 9e8e623b6..95da4abb3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go @@ -16,14 +16,14 @@ import ( // LocationProperties struct for LocationProperties type LocationProperties struct { - // The location name. - Name *string `json:"name,omitempty"` + // A list of available CPU types and related resources available in the location. + CpuArchitecture *[]CpuArchitectureProperties `json:"cpuArchitecture,omitempty"` // A list of available features in the location. Features *[]string `json:"features,omitempty"` // A list of image aliases available in the location. ImageAliases *[]string `json:"imageAliases,omitempty"` - // A list of available CPU types and related resources available in the location. - CpuArchitecture *[]CpuArchitectureProperties `json:"cpuArchitecture,omitempty"` + // The location name. + Name *string `json:"name,omitempty"` } // NewLocationProperties instantiates a new LocationProperties object @@ -44,38 +44,38 @@ func NewLocationPropertiesWithDefaults() *LocationProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LocationProperties) GetName() *string { +// GetCpuArchitecture returns the CpuArchitecture field value +// If the value is explicit nil, nil is returned +func (o *LocationProperties) GetCpuArchitecture() *[]CpuArchitectureProperties { if o == nil { return nil } - return o.Name + return o.CpuArchitecture } -// GetNameOk returns a tuple with the Name field value +// GetCpuArchitectureOk returns a tuple with the CpuArchitecture field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LocationProperties) GetNameOk() (*string, bool) { +func (o *LocationProperties) GetCpuArchitectureOk() (*[]CpuArchitectureProperties, bool) { if o == nil { return nil, false } - return o.Name, true + return o.CpuArchitecture, true } -// SetName sets field value -func (o *LocationProperties) SetName(v string) { +// SetCpuArchitecture sets field value +func (o *LocationProperties) SetCpuArchitecture(v []CpuArchitectureProperties) { - o.Name = &v + o.CpuArchitecture = &v } -// HasName returns a boolean if a field has been set. -func (o *LocationProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasCpuArchitecture returns a boolean if a field has been set. +func (o *LocationProperties) HasCpuArchitecture() bool { + if o != nil && o.CpuArchitecture != nil { return true } @@ -83,7 +83,7 @@ func (o *LocationProperties) HasName() bool { } // GetFeatures returns the Features field value -// If the value is explicit nil, the zero value for []string will be returned +// If the value is explicit nil, nil is returned func (o *LocationProperties) GetFeatures() *[]string { if o == nil { return nil @@ -121,7 +121,7 @@ func (o *LocationProperties) HasFeatures() bool { } // GetImageAliases returns the ImageAliases field value -// If the value is explicit nil, the zero value for []string will be returned +// If the value is explicit nil, nil is returned func (o *LocationProperties) GetImageAliases() *[]string { if o == nil { return nil @@ -158,38 +158,38 @@ func (o *LocationProperties) HasImageAliases() bool { return false } -// GetCpuArchitecture returns the CpuArchitecture field value -// If the value is explicit nil, the zero value for []CpuArchitectureProperties will be returned -func (o *LocationProperties) GetCpuArchitecture() *[]CpuArchitectureProperties { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *LocationProperties) GetName() *string { if o == nil { return nil } - return o.CpuArchitecture + return o.Name } -// GetCpuArchitectureOk returns a tuple with the CpuArchitecture field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LocationProperties) GetCpuArchitectureOk() (*[]CpuArchitectureProperties, bool) { +func (o *LocationProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.CpuArchitecture, true + return o.Name, true } -// SetCpuArchitecture sets field value -func (o *LocationProperties) SetCpuArchitecture(v []CpuArchitectureProperties) { +// SetName sets field value +func (o *LocationProperties) SetName(v string) { - o.CpuArchitecture = &v + o.Name = &v } -// HasCpuArchitecture returns a boolean if a field has been set. -func (o *LocationProperties) HasCpuArchitecture() bool { - if o != nil && o.CpuArchitecture != nil { +// HasName returns a boolean if a field has been set. +func (o *LocationProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -198,18 +198,22 @@ func (o *LocationProperties) HasCpuArchitecture() bool { func (o LocationProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.CpuArchitecture != nil { + toSerialize["cpuArchitecture"] = o.CpuArchitecture } + if o.Features != nil { toSerialize["features"] = o.Features } + if o.ImageAliases != nil { toSerialize["imageAliases"] = o.ImageAliases } - if o.CpuArchitecture != nil { - toSerialize["cpuArchitecture"] = o.CpuArchitecture + + if o.Name != nil { + toSerialize["name"] = o.Name } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_locations.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_locations.go index ba9354ace..d3311051b 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_locations.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_locations.go @@ -16,14 +16,14 @@ import ( // Locations struct for Locations type Locations struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Location `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewLocations instantiates a new Locations object @@ -44,152 +44,152 @@ func NewLocationsWithDefaults() *Locations { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Locations) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Locations) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Locations) GetIdOk() (*string, bool) { +func (o *Locations) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Locations) SetId(v string) { +// SetHref sets field value +func (o *Locations) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Locations) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Locations) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Locations) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Locations) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Locations) GetTypeOk() (*Type, bool) { +func (o *Locations) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Locations) SetType(v Type) { +// SetId sets field value +func (o *Locations) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Locations) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Locations) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Locations) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Locations) GetItems() *[]Location { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Locations) GetHrefOk() (*string, bool) { +func (o *Locations) GetItemsOk() (*[]Location, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Locations) SetHref(v string) { +// SetItems sets field value +func (o *Locations) SetItems(v []Location) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Locations) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Locations) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Location will be returned -func (o *Locations) GetItems() *[]Location { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Locations) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Locations) GetItemsOk() (*[]Location, bool) { +func (o *Locations) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Locations) SetItems(v []Location) { +// SetType sets field value +func (o *Locations) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Locations) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Locations) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Locations) HasItems() bool { func (o Locations) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway.go index 70f8e92a9..2feaa609f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway.go @@ -16,15 +16,15 @@ import ( // NatGateway struct for NatGateway type NatGateway struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *NatGatewayEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *NatGatewayProperties `json:"properties"` - Entities *NatGatewayEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNatGateway instantiates a new NatGateway object @@ -47,114 +47,114 @@ func NewNatGatewayWithDefaults() *NatGateway { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGateway) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *NatGateway) GetEntities() *NatGatewayEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateway) GetIdOk() (*string, bool) { +func (o *NatGateway) GetEntitiesOk() (*NatGatewayEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *NatGateway) SetId(v string) { +// SetEntities sets field value +func (o *NatGateway) SetEntities(v NatGatewayEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGateway) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *NatGateway) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NatGateway) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NatGateway) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateway) GetTypeOk() (*Type, bool) { +func (o *NatGateway) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *NatGateway) SetType(v Type) { +// SetHref sets field value +func (o *NatGateway) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGateway) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *NatGateway) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGateway) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGateway) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateway) GetHrefOk() (*string, bool) { +func (o *NatGateway) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *NatGateway) SetHref(v string) { +// SetId sets field value +func (o *NatGateway) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *NatGateway) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGateway) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *NatGateway) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *NatGateway) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *NatGateway) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NatGatewayProperties will be returned +// If the value is explicit nil, nil is returned func (o *NatGateway) GetProperties() *NatGatewayProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *NatGateway) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for NatGatewayEntities will be returned -func (o *NatGateway) GetEntities() *NatGatewayEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGateway) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateway) GetEntitiesOk() (*NatGatewayEntities, bool) { +func (o *NatGateway) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *NatGateway) SetEntities(v NatGatewayEntities) { +// SetType sets field value +func (o *NatGateway) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *NatGateway) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGateway) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *NatGateway) HasEntities() bool { func (o NatGateway) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_entities.go index d51cb9543..94fae49c4 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_entities.go @@ -16,8 +16,8 @@ import ( // NatGatewayEntities struct for NatGatewayEntities type NatGatewayEntities struct { - Rules *NatGatewayRules `json:"rules,omitempty"` Flowlogs *FlowLogs `json:"flowlogs,omitempty"` + Rules *NatGatewayRules `json:"rules,omitempty"` } // NewNatGatewayEntities instantiates a new NatGatewayEntities object @@ -38,76 +38,76 @@ func NewNatGatewayEntitiesWithDefaults() *NatGatewayEntities { return &this } -// GetRules returns the Rules field value -// If the value is explicit nil, the zero value for NatGatewayRules will be returned -func (o *NatGatewayEntities) GetRules() *NatGatewayRules { +// GetFlowlogs returns the Flowlogs field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayEntities) GetFlowlogs() *FlowLogs { if o == nil { return nil } - return o.Rules + return o.Flowlogs } -// GetRulesOk returns a tuple with the Rules field value +// GetFlowlogsOk returns a tuple with the Flowlogs field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayEntities) GetRulesOk() (*NatGatewayRules, bool) { +func (o *NatGatewayEntities) GetFlowlogsOk() (*FlowLogs, bool) { if o == nil { return nil, false } - return o.Rules, true + return o.Flowlogs, true } -// SetRules sets field value -func (o *NatGatewayEntities) SetRules(v NatGatewayRules) { +// SetFlowlogs sets field value +func (o *NatGatewayEntities) SetFlowlogs(v FlowLogs) { - o.Rules = &v + o.Flowlogs = &v } -// HasRules returns a boolean if a field has been set. -func (o *NatGatewayEntities) HasRules() bool { - if o != nil && o.Rules != nil { +// HasFlowlogs returns a boolean if a field has been set. +func (o *NatGatewayEntities) HasFlowlogs() bool { + if o != nil && o.Flowlogs != nil { return true } return false } -// GetFlowlogs returns the Flowlogs field value -// If the value is explicit nil, the zero value for FlowLogs will be returned -func (o *NatGatewayEntities) GetFlowlogs() *FlowLogs { +// GetRules returns the Rules field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayEntities) GetRules() *NatGatewayRules { if o == nil { return nil } - return o.Flowlogs + return o.Rules } -// GetFlowlogsOk returns a tuple with the Flowlogs field value +// GetRulesOk returns a tuple with the Rules field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayEntities) GetFlowlogsOk() (*FlowLogs, bool) { +func (o *NatGatewayEntities) GetRulesOk() (*NatGatewayRules, bool) { if o == nil { return nil, false } - return o.Flowlogs, true + return o.Rules, true } -// SetFlowlogs sets field value -func (o *NatGatewayEntities) SetFlowlogs(v FlowLogs) { +// SetRules sets field value +func (o *NatGatewayEntities) SetRules(v NatGatewayRules) { - o.Flowlogs = &v + o.Rules = &v } -// HasFlowlogs returns a boolean if a field has been set. -func (o *NatGatewayEntities) HasFlowlogs() bool { - if o != nil && o.Flowlogs != nil { +// HasRules returns a boolean if a field has been set. +func (o *NatGatewayEntities) HasRules() bool { + if o != nil && o.Rules != nil { return true } @@ -116,12 +116,14 @@ func (o *NatGatewayEntities) HasFlowlogs() bool { func (o NatGatewayEntities) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Rules != nil { - toSerialize["rules"] = o.Rules - } if o.Flowlogs != nil { toSerialize["flowlogs"] = o.Flowlogs } + + if o.Rules != nil { + toSerialize["rules"] = o.Rules + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_lan_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_lan_properties.go index 007a57d69..068b141be 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_lan_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_lan_properties.go @@ -16,10 +16,10 @@ import ( // NatGatewayLanProperties struct for NatGatewayLanProperties type NatGatewayLanProperties struct { - // Id for the LAN connected to the NAT Gateway - Id *int32 `json:"id"` // Collection of gateway IP addresses of the NAT Gateway. Will be auto-generated if not provided. Should ideally be an IP belonging to the same subnet as the LAN GatewayIps *[]string `json:"gatewayIps,omitempty"` + // Id for the LAN connected to the NAT Gateway + Id *int32 `json:"id"` } // NewNatGatewayLanProperties instantiates a new NatGatewayLanProperties object @@ -42,76 +42,76 @@ func NewNatGatewayLanPropertiesWithDefaults() *NatGatewayLanProperties { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *NatGatewayLanProperties) GetId() *int32 { +// GetGatewayIps returns the GatewayIps field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayLanProperties) GetGatewayIps() *[]string { if o == nil { return nil } - return o.Id + return o.GatewayIps } -// GetIdOk returns a tuple with the Id field value +// GetGatewayIpsOk returns a tuple with the GatewayIps field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayLanProperties) GetIdOk() (*int32, bool) { +func (o *NatGatewayLanProperties) GetGatewayIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.GatewayIps, true } -// SetId sets field value -func (o *NatGatewayLanProperties) SetId(v int32) { +// SetGatewayIps sets field value +func (o *NatGatewayLanProperties) SetGatewayIps(v []string) { - o.Id = &v + o.GatewayIps = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGatewayLanProperties) HasId() bool { - if o != nil && o.Id != nil { +// HasGatewayIps returns a boolean if a field has been set. +func (o *NatGatewayLanProperties) HasGatewayIps() bool { + if o != nil && o.GatewayIps != nil { return true } return false } -// GetGatewayIps returns the GatewayIps field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *NatGatewayLanProperties) GetGatewayIps() *[]string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayLanProperties) GetId() *int32 { if o == nil { return nil } - return o.GatewayIps + return o.Id } -// GetGatewayIpsOk returns a tuple with the GatewayIps field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayLanProperties) GetGatewayIpsOk() (*[]string, bool) { +func (o *NatGatewayLanProperties) GetIdOk() (*int32, bool) { if o == nil { return nil, false } - return o.GatewayIps, true + return o.Id, true } -// SetGatewayIps sets field value -func (o *NatGatewayLanProperties) SetGatewayIps(v []string) { +// SetId sets field value +func (o *NatGatewayLanProperties) SetId(v int32) { - o.GatewayIps = &v + o.Id = &v } -// HasGatewayIps returns a boolean if a field has been set. -func (o *NatGatewayLanProperties) HasGatewayIps() bool { - if o != nil && o.GatewayIps != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGatewayLanProperties) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -120,12 +120,14 @@ func (o *NatGatewayLanProperties) HasGatewayIps() bool { func (o NatGatewayLanProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } if o.GatewayIps != nil { toSerialize["gatewayIps"] = o.GatewayIps } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_properties.go index 6eea997ef..ad784ad9f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_properties.go @@ -16,12 +16,12 @@ import ( // NatGatewayProperties struct for NatGatewayProperties type NatGatewayProperties struct { + // Collection of LANs connected to the NAT Gateway. IPs must contain a valid subnet mask. If no IP is provided, the system will generate an IP with /24 subnet. + Lans *[]NatGatewayLanProperties `json:"lans,omitempty"` // Name of the NAT Gateway. Name *string `json:"name"` // Collection of public IP addresses of the NAT Gateway. Should be customer reserved IP addresses in that location. PublicIps *[]string `json:"publicIps"` - // Collection of LANs connected to the NAT Gateway. IPs must contain a valid subnet mask. If no IP is provided, the system will generate an IP with /24 subnet. - Lans *[]NatGatewayLanProperties `json:"lans,omitempty"` } // NewNatGatewayProperties instantiates a new NatGatewayProperties object @@ -45,114 +45,114 @@ func NewNatGatewayPropertiesWithDefaults() *NatGatewayProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayProperties) GetName() *string { +// GetLans returns the Lans field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayProperties) GetLans() *[]NatGatewayLanProperties { if o == nil { return nil } - return o.Name + return o.Lans } -// GetNameOk returns a tuple with the Name field value +// GetLansOk returns a tuple with the Lans field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayProperties) GetNameOk() (*string, bool) { +func (o *NatGatewayProperties) GetLansOk() (*[]NatGatewayLanProperties, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Lans, true } -// SetName sets field value -func (o *NatGatewayProperties) SetName(v string) { +// SetLans sets field value +func (o *NatGatewayProperties) SetLans(v []NatGatewayLanProperties) { - o.Name = &v + o.Lans = &v } -// HasName returns a boolean if a field has been set. -func (o *NatGatewayProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasLans returns a boolean if a field has been set. +func (o *NatGatewayProperties) HasLans() bool { + if o != nil && o.Lans != nil { return true } return false } -// GetPublicIps returns the PublicIps field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *NatGatewayProperties) GetPublicIps() *[]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayProperties) GetName() *string { if o == nil { return nil } - return o.PublicIps + return o.Name } -// GetPublicIpsOk returns a tuple with the PublicIps field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayProperties) GetPublicIpsOk() (*[]string, bool) { +func (o *NatGatewayProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.PublicIps, true + return o.Name, true } -// SetPublicIps sets field value -func (o *NatGatewayProperties) SetPublicIps(v []string) { +// SetName sets field value +func (o *NatGatewayProperties) SetName(v string) { - o.PublicIps = &v + o.Name = &v } -// HasPublicIps returns a boolean if a field has been set. -func (o *NatGatewayProperties) HasPublicIps() bool { - if o != nil && o.PublicIps != nil { +// HasName returns a boolean if a field has been set. +func (o *NatGatewayProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetLans returns the Lans field value -// If the value is explicit nil, the zero value for []NatGatewayLanProperties will be returned -func (o *NatGatewayProperties) GetLans() *[]NatGatewayLanProperties { +// GetPublicIps returns the PublicIps field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayProperties) GetPublicIps() *[]string { if o == nil { return nil } - return o.Lans + return o.PublicIps } -// GetLansOk returns a tuple with the Lans field value +// GetPublicIpsOk returns a tuple with the PublicIps field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayProperties) GetLansOk() (*[]NatGatewayLanProperties, bool) { +func (o *NatGatewayProperties) GetPublicIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Lans, true + return o.PublicIps, true } -// SetLans sets field value -func (o *NatGatewayProperties) SetLans(v []NatGatewayLanProperties) { +// SetPublicIps sets field value +func (o *NatGatewayProperties) SetPublicIps(v []string) { - o.Lans = &v + o.PublicIps = &v } -// HasLans returns a boolean if a field has been set. -func (o *NatGatewayProperties) HasLans() bool { - if o != nil && o.Lans != nil { +// HasPublicIps returns a boolean if a field has been set. +func (o *NatGatewayProperties) HasPublicIps() bool { + if o != nil && o.PublicIps != nil { return true } @@ -161,15 +161,18 @@ func (o *NatGatewayProperties) HasLans() bool { func (o NatGatewayProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.Lans != nil { + toSerialize["lans"] = o.Lans + } + if o.Name != nil { toSerialize["name"] = o.Name } + if o.PublicIps != nil { toSerialize["publicIps"] = o.PublicIps } - if o.Lans != nil { - toSerialize["lans"] = o.Lans - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_put.go index cf9a8a048..5bd37720e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_put.go @@ -16,13 +16,13 @@ import ( // NatGatewayPut struct for NatGatewayPut type NatGatewayPut struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *NatGatewayProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *NatGatewayProperties `json:"properties"` } // NewNatGatewayPut instantiates a new NatGatewayPut object @@ -45,152 +45,152 @@ func NewNatGatewayPutWithDefaults() *NatGatewayPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayPut) GetIdOk() (*string, bool) { +func (o *NatGatewayPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NatGatewayPut) SetId(v string) { +// SetHref sets field value +func (o *NatGatewayPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGatewayPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NatGatewayPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NatGatewayPut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayPut) GetTypeOk() (*Type, bool) { +func (o *NatGatewayPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NatGatewayPut) SetType(v Type) { +// SetId sets field value +func (o *NatGatewayPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGatewayPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGatewayPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayPut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayPut) GetProperties() *NatGatewayProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayPut) GetHrefOk() (*string, bool) { +func (o *NatGatewayPut) GetPropertiesOk() (*NatGatewayProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *NatGatewayPut) SetHref(v string) { +// SetProperties sets field value +func (o *NatGatewayPut) SetProperties(v NatGatewayProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *NatGatewayPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NatGatewayPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NatGatewayProperties will be returned -func (o *NatGatewayPut) GetProperties() *NatGatewayProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayPut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayPut) GetPropertiesOk() (*NatGatewayProperties, bool) { +func (o *NatGatewayPut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NatGatewayPut) SetProperties(v NatGatewayProperties) { +// SetType sets field value +func (o *NatGatewayPut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NatGatewayPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGatewayPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *NatGatewayPut) HasProperties() bool { func (o NatGatewayPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule.go index 42d20aac8..4693555ca 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule.go @@ -16,14 +16,14 @@ import ( // NatGatewayRule struct for NatGatewayRule type NatGatewayRule struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *NatGatewayRuleProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNatGatewayRule instantiates a new NatGatewayRule object @@ -46,190 +46,190 @@ func NewNatGatewayRuleWithDefaults() *NatGatewayRule { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRule) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRule) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRule) GetIdOk() (*string, bool) { +func (o *NatGatewayRule) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NatGatewayRule) SetId(v string) { +// SetHref sets field value +func (o *NatGatewayRule) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGatewayRule) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NatGatewayRule) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NatGatewayRule) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRule) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRule) GetTypeOk() (*Type, bool) { +func (o *NatGatewayRule) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NatGatewayRule) SetType(v Type) { +// SetId sets field value +func (o *NatGatewayRule) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGatewayRule) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGatewayRule) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRule) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRule) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRule) GetHrefOk() (*string, bool) { +func (o *NatGatewayRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *NatGatewayRule) SetHref(v string) { +// SetMetadata sets field value +func (o *NatGatewayRule) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *NatGatewayRule) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *NatGatewayRule) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *NatGatewayRule) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRule) GetProperties() *NatGatewayRuleProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *NatGatewayRule) GetPropertiesOk() (*NatGatewayRuleProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *NatGatewayRule) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *NatGatewayRule) SetProperties(v NatGatewayRuleProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *NatGatewayRule) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NatGatewayRule) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NatGatewayRuleProperties will be returned -func (o *NatGatewayRule) GetProperties() *NatGatewayRuleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRule) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRule) GetPropertiesOk() (*NatGatewayRuleProperties, bool) { +func (o *NatGatewayRule) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NatGatewayRule) SetProperties(v NatGatewayRuleProperties) { +// SetType sets field value +func (o *NatGatewayRule) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NatGatewayRule) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGatewayRule) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *NatGatewayRule) HasProperties() bool { func (o NatGatewayRule) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_properties.go index 8b3da441e..32330acee 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_properties.go @@ -18,29 +18,29 @@ import ( type NatGatewayRuleProperties struct { // The name of the NAT Gateway rule. Name *string `json:"name"` - // Type of the NAT Gateway rule. - Type *NatGatewayRuleType `json:"type,omitempty"` // Protocol of the NAT Gateway rule. Defaults to ALL. If protocol is 'ICMP' then targetPortRange start and end cannot be set. Protocol *NatGatewayRuleProtocol `json:"protocol,omitempty"` - // Source subnet of the NAT Gateway rule. For SNAT rules it specifies which packets this translation rule applies to based on the packets source IP address. - SourceSubnet *string `json:"sourceSubnet"` // Public IP address of the NAT Gateway rule. Specifies the address used for masking outgoing packets source address field. Should be one of the customer reserved IP address already configured on the NAT Gateway resource PublicIp *string `json:"publicIp"` - // Target or destination subnet of the NAT Gateway rule. For SNAT rules it specifies which packets this translation rule applies to based on the packets destination IP address. If none is provided, rule will match any address. - TargetSubnet *string `json:"targetSubnet,omitempty"` + // Source subnet of the NAT Gateway rule. For SNAT rules it specifies which packets this translation rule applies to based on the packets source IP address. + SourceSubnet *string `json:"sourceSubnet"` TargetPortRange *TargetPortRange `json:"targetPortRange,omitempty"` + // Target or destination subnet of the NAT Gateway rule. For SNAT rules it specifies which packets this translation rule applies to based on the packets destination IP address. If none is provided, rule will match any address. + TargetSubnet *string `json:"targetSubnet,omitempty"` + // Type of the NAT Gateway rule. + Type *NatGatewayRuleType `json:"type,omitempty"` } // NewNatGatewayRuleProperties instantiates a new NatGatewayRuleProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewNatGatewayRuleProperties(name string, sourceSubnet string, publicIp string) *NatGatewayRuleProperties { +func NewNatGatewayRuleProperties(name string, publicIp string, sourceSubnet string) *NatGatewayRuleProperties { this := NatGatewayRuleProperties{} this.Name = &name - this.SourceSubnet = &sourceSubnet this.PublicIp = &publicIp + this.SourceSubnet = &sourceSubnet return &this } @@ -54,7 +54,7 @@ func NewNatGatewayRulePropertiesWithDefaults() *NatGatewayRuleProperties { } // GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NatGatewayRuleProperties) GetName() *string { if o == nil { return nil @@ -91,76 +91,76 @@ func (o *NatGatewayRuleProperties) HasName() bool { return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for NatGatewayRuleType will be returned -func (o *NatGatewayRuleProperties) GetType() *NatGatewayRuleType { +// GetProtocol returns the Protocol field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRuleProperties) GetProtocol() *NatGatewayRuleProtocol { if o == nil { return nil } - return o.Type + return o.Protocol } -// GetTypeOk returns a tuple with the Type field value +// GetProtocolOk returns a tuple with the Protocol field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRuleProperties) GetTypeOk() (*NatGatewayRuleType, bool) { +func (o *NatGatewayRuleProperties) GetProtocolOk() (*NatGatewayRuleProtocol, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Protocol, true } -// SetType sets field value -func (o *NatGatewayRuleProperties) SetType(v NatGatewayRuleType) { +// SetProtocol sets field value +func (o *NatGatewayRuleProperties) SetProtocol(v NatGatewayRuleProtocol) { - o.Type = &v + o.Protocol = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGatewayRuleProperties) HasType() bool { - if o != nil && o.Type != nil { +// HasProtocol returns a boolean if a field has been set. +func (o *NatGatewayRuleProperties) HasProtocol() bool { + if o != nil && o.Protocol != nil { return true } return false } -// GetProtocol returns the Protocol field value -// If the value is explicit nil, the zero value for NatGatewayRuleProtocol will be returned -func (o *NatGatewayRuleProperties) GetProtocol() *NatGatewayRuleProtocol { +// GetPublicIp returns the PublicIp field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRuleProperties) GetPublicIp() *string { if o == nil { return nil } - return o.Protocol + return o.PublicIp } -// GetProtocolOk returns a tuple with the Protocol field value +// GetPublicIpOk returns a tuple with the PublicIp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRuleProperties) GetProtocolOk() (*NatGatewayRuleProtocol, bool) { +func (o *NatGatewayRuleProperties) GetPublicIpOk() (*string, bool) { if o == nil { return nil, false } - return o.Protocol, true + return o.PublicIp, true } -// SetProtocol sets field value -func (o *NatGatewayRuleProperties) SetProtocol(v NatGatewayRuleProtocol) { +// SetPublicIp sets field value +func (o *NatGatewayRuleProperties) SetPublicIp(v string) { - o.Protocol = &v + o.PublicIp = &v } -// HasProtocol returns a boolean if a field has been set. -func (o *NatGatewayRuleProperties) HasProtocol() bool { - if o != nil && o.Protocol != nil { +// HasPublicIp returns a boolean if a field has been set. +func (o *NatGatewayRuleProperties) HasPublicIp() bool { + if o != nil && o.PublicIp != nil { return true } @@ -168,7 +168,7 @@ func (o *NatGatewayRuleProperties) HasProtocol() bool { } // GetSourceSubnet returns the SourceSubnet field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NatGatewayRuleProperties) GetSourceSubnet() *string { if o == nil { return nil @@ -205,38 +205,38 @@ func (o *NatGatewayRuleProperties) HasSourceSubnet() bool { return false } -// GetPublicIp returns the PublicIp field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRuleProperties) GetPublicIp() *string { +// GetTargetPortRange returns the TargetPortRange field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRuleProperties) GetTargetPortRange() *TargetPortRange { if o == nil { return nil } - return o.PublicIp + return o.TargetPortRange } -// GetPublicIpOk returns a tuple with the PublicIp field value +// GetTargetPortRangeOk returns a tuple with the TargetPortRange field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRuleProperties) GetPublicIpOk() (*string, bool) { +func (o *NatGatewayRuleProperties) GetTargetPortRangeOk() (*TargetPortRange, bool) { if o == nil { return nil, false } - return o.PublicIp, true + return o.TargetPortRange, true } -// SetPublicIp sets field value -func (o *NatGatewayRuleProperties) SetPublicIp(v string) { +// SetTargetPortRange sets field value +func (o *NatGatewayRuleProperties) SetTargetPortRange(v TargetPortRange) { - o.PublicIp = &v + o.TargetPortRange = &v } -// HasPublicIp returns a boolean if a field has been set. -func (o *NatGatewayRuleProperties) HasPublicIp() bool { - if o != nil && o.PublicIp != nil { +// HasTargetPortRange returns a boolean if a field has been set. +func (o *NatGatewayRuleProperties) HasTargetPortRange() bool { + if o != nil && o.TargetPortRange != nil { return true } @@ -244,7 +244,7 @@ func (o *NatGatewayRuleProperties) HasPublicIp() bool { } // GetTargetSubnet returns the TargetSubnet field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NatGatewayRuleProperties) GetTargetSubnet() *string { if o == nil { return nil @@ -281,38 +281,38 @@ func (o *NatGatewayRuleProperties) HasTargetSubnet() bool { return false } -// GetTargetPortRange returns the TargetPortRange field value -// If the value is explicit nil, the zero value for TargetPortRange will be returned -func (o *NatGatewayRuleProperties) GetTargetPortRange() *TargetPortRange { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRuleProperties) GetType() *NatGatewayRuleType { if o == nil { return nil } - return o.TargetPortRange + return o.Type } -// GetTargetPortRangeOk returns a tuple with the TargetPortRange field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRuleProperties) GetTargetPortRangeOk() (*TargetPortRange, bool) { +func (o *NatGatewayRuleProperties) GetTypeOk() (*NatGatewayRuleType, bool) { if o == nil { return nil, false } - return o.TargetPortRange, true + return o.Type, true } -// SetTargetPortRange sets field value -func (o *NatGatewayRuleProperties) SetTargetPortRange(v TargetPortRange) { +// SetType sets field value +func (o *NatGatewayRuleProperties) SetType(v NatGatewayRuleType) { - o.TargetPortRange = &v + o.Type = &v } -// HasTargetPortRange returns a boolean if a field has been set. -func (o *NatGatewayRuleProperties) HasTargetPortRange() bool { - if o != nil && o.TargetPortRange != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGatewayRuleProperties) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -324,24 +324,31 @@ func (o NatGatewayRuleProperties) MarshalJSON() ([]byte, error) { if o.Name != nil { toSerialize["name"] = o.Name } - if o.Type != nil { - toSerialize["type"] = o.Type - } + if o.Protocol != nil { toSerialize["protocol"] = o.Protocol } + + if o.PublicIp != nil { + toSerialize["publicIp"] = o.PublicIp + } + if o.SourceSubnet != nil { toSerialize["sourceSubnet"] = o.SourceSubnet } - if o.PublicIp != nil { - toSerialize["publicIp"] = o.PublicIp + + if o.TargetPortRange != nil { + toSerialize["targetPortRange"] = o.TargetPortRange } + if o.TargetSubnet != nil { toSerialize["targetSubnet"] = o.TargetSubnet } - if o.TargetPortRange != nil { - toSerialize["targetPortRange"] = o.TargetPortRange + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_put.go index 2f709351b..e0f92b409 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rule_put.go @@ -16,13 +16,13 @@ import ( // NatGatewayRulePut struct for NatGatewayRulePut type NatGatewayRulePut struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *NatGatewayRuleProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *NatGatewayRuleProperties `json:"properties"` } // NewNatGatewayRulePut instantiates a new NatGatewayRulePut object @@ -45,152 +45,152 @@ func NewNatGatewayRulePutWithDefaults() *NatGatewayRulePut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRulePut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRulePut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRulePut) GetIdOk() (*string, bool) { +func (o *NatGatewayRulePut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NatGatewayRulePut) SetId(v string) { +// SetHref sets field value +func (o *NatGatewayRulePut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGatewayRulePut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NatGatewayRulePut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NatGatewayRulePut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRulePut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRulePut) GetTypeOk() (*Type, bool) { +func (o *NatGatewayRulePut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NatGatewayRulePut) SetType(v Type) { +// SetId sets field value +func (o *NatGatewayRulePut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGatewayRulePut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGatewayRulePut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRulePut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRulePut) GetProperties() *NatGatewayRuleProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRulePut) GetHrefOk() (*string, bool) { +func (o *NatGatewayRulePut) GetPropertiesOk() (*NatGatewayRuleProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *NatGatewayRulePut) SetHref(v string) { +// SetProperties sets field value +func (o *NatGatewayRulePut) SetProperties(v NatGatewayRuleProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *NatGatewayRulePut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NatGatewayRulePut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NatGatewayRuleProperties will be returned -func (o *NatGatewayRulePut) GetProperties() *NatGatewayRuleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRulePut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRulePut) GetPropertiesOk() (*NatGatewayRuleProperties, bool) { +func (o *NatGatewayRulePut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NatGatewayRulePut) SetProperties(v NatGatewayRuleProperties) { +// SetType sets field value +func (o *NatGatewayRulePut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NatGatewayRulePut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGatewayRulePut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *NatGatewayRulePut) HasProperties() bool { func (o NatGatewayRulePut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rules.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rules.go index 780bef1f9..9d3d41353 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rules.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateway_rules.go @@ -16,14 +16,14 @@ import ( // NatGatewayRules struct for NatGatewayRules type NatGatewayRules struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]NatGatewayRule `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNatGatewayRules instantiates a new NatGatewayRules object @@ -44,152 +44,152 @@ func NewNatGatewayRulesWithDefaults() *NatGatewayRules { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRules) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRules) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRules) GetIdOk() (*string, bool) { +func (o *NatGatewayRules) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NatGatewayRules) SetId(v string) { +// SetHref sets field value +func (o *NatGatewayRules) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGatewayRules) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NatGatewayRules) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NatGatewayRules) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRules) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRules) GetTypeOk() (*Type, bool) { +func (o *NatGatewayRules) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NatGatewayRules) SetType(v Type) { +// SetId sets field value +func (o *NatGatewayRules) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGatewayRules) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGatewayRules) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGatewayRules) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRules) GetItems() *[]NatGatewayRule { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRules) GetHrefOk() (*string, bool) { +func (o *NatGatewayRules) GetItemsOk() (*[]NatGatewayRule, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *NatGatewayRules) SetHref(v string) { +// SetItems sets field value +func (o *NatGatewayRules) SetItems(v []NatGatewayRule) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *NatGatewayRules) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *NatGatewayRules) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []NatGatewayRule will be returned -func (o *NatGatewayRules) GetItems() *[]NatGatewayRule { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGatewayRules) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGatewayRules) GetItemsOk() (*[]NatGatewayRule, bool) { +func (o *NatGatewayRules) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *NatGatewayRules) SetItems(v []NatGatewayRule) { +// SetType sets field value +func (o *NatGatewayRules) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *NatGatewayRules) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGatewayRules) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *NatGatewayRules) HasItems() bool { func (o NatGatewayRules) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateways.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateways.go index ea80671fe..2139508cc 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateways.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nat_gateways.go @@ -16,19 +16,19 @@ import ( // NatGateways struct for NatGateways type NatGateways struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]NatGateway `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNatGateways instantiates a new NatGateways object @@ -49,114 +49,114 @@ func NewNatGatewaysWithDefaults() *NatGateways { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGateways) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *NatGateways) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateways) GetIdOk() (*string, bool) { +func (o *NatGateways) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *NatGateways) SetId(v string) { +// SetLinks sets field value +func (o *NatGateways) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *NatGateways) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *NatGateways) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NatGateways) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NatGateways) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateways) GetTypeOk() (*Type, bool) { +func (o *NatGateways) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *NatGateways) SetType(v Type) { +// SetHref sets field value +func (o *NatGateways) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *NatGateways) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *NatGateways) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NatGateways) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NatGateways) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateways) GetHrefOk() (*string, bool) { +func (o *NatGateways) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *NatGateways) SetHref(v string) { +// SetId sets field value +func (o *NatGateways) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *NatGateways) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *NatGateways) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *NatGateways) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []NatGateway will be returned +// If the value is explicit nil, nil is returned func (o *NatGateways) GetItems() *[]NatGateway { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *NatGateways) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *NatGateways) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *NatGateways) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateways) GetOffsetOk() (*float32, bool) { +func (o *NatGateways) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *NatGateways) SetOffset(v float32) { +// SetLimit sets field value +func (o *NatGateways) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *NatGateways) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *NatGateways) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *NatGateways) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *NatGateways) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateways) GetLimitOk() (*float32, bool) { +func (o *NatGateways) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *NatGateways) SetLimit(v float32) { +// SetOffset sets field value +func (o *NatGateways) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *NatGateways) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *NatGateways) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *NatGateways) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NatGateways) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NatGateways) GetLinksOk() (*PaginationLinks, bool) { +func (o *NatGateways) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *NatGateways) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *NatGateways) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *NatGateways) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *NatGateways) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *NatGateways) HasLinks() bool { func (o NatGateways) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer.go index 265e075fa..efaed067d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer.go @@ -16,15 +16,15 @@ import ( // NetworkLoadBalancer struct for NetworkLoadBalancer type NetworkLoadBalancer struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *NetworkLoadBalancerEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *NetworkLoadBalancerProperties `json:"properties"` - Entities *NetworkLoadBalancerEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNetworkLoadBalancer instantiates a new NetworkLoadBalancer object @@ -47,114 +47,114 @@ func NewNetworkLoadBalancerWithDefaults() *NetworkLoadBalancer { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancer) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancer) GetEntities() *NetworkLoadBalancerEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancer) GetIdOk() (*string, bool) { +func (o *NetworkLoadBalancer) GetEntitiesOk() (*NetworkLoadBalancerEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *NetworkLoadBalancer) SetId(v string) { +// SetEntities sets field value +func (o *NetworkLoadBalancer) SetEntities(v NetworkLoadBalancerEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *NetworkLoadBalancer) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *NetworkLoadBalancer) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NetworkLoadBalancer) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancer) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancer) GetTypeOk() (*Type, bool) { +func (o *NetworkLoadBalancer) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *NetworkLoadBalancer) SetType(v Type) { +// SetHref sets field value +func (o *NetworkLoadBalancer) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *NetworkLoadBalancer) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *NetworkLoadBalancer) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancer) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancer) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancer) GetHrefOk() (*string, bool) { +func (o *NetworkLoadBalancer) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *NetworkLoadBalancer) SetHref(v string) { +// SetId sets field value +func (o *NetworkLoadBalancer) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *NetworkLoadBalancer) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *NetworkLoadBalancer) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *NetworkLoadBalancer) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancer) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *NetworkLoadBalancer) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerProperties will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancer) GetProperties() *NetworkLoadBalancerProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *NetworkLoadBalancer) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerEntities will be returned -func (o *NetworkLoadBalancer) GetEntities() *NetworkLoadBalancerEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancer) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancer) GetEntitiesOk() (*NetworkLoadBalancerEntities, bool) { +func (o *NetworkLoadBalancer) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *NetworkLoadBalancer) SetEntities(v NetworkLoadBalancerEntities) { +// SetType sets field value +func (o *NetworkLoadBalancer) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *NetworkLoadBalancer) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *NetworkLoadBalancer) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *NetworkLoadBalancer) HasEntities() bool { func (o NetworkLoadBalancer) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_entities.go index 85dd3bbd6..cdd74071c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_entities.go @@ -39,7 +39,7 @@ func NewNetworkLoadBalancerEntitiesWithDefaults() *NetworkLoadBalancerEntities { } // GetFlowlogs returns the Flowlogs field value -// If the value is explicit nil, the zero value for FlowLogs will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerEntities) GetFlowlogs() *FlowLogs { if o == nil { return nil @@ -77,7 +77,7 @@ func (o *NetworkLoadBalancerEntities) HasFlowlogs() bool { } // GetForwardingrules returns the Forwardingrules field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerForwardingRules will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerEntities) GetForwardingrules() *NetworkLoadBalancerForwardingRules { if o == nil { return nil @@ -119,9 +119,11 @@ func (o NetworkLoadBalancerEntities) MarshalJSON() ([]byte, error) { if o.Flowlogs != nil { toSerialize["flowlogs"] = o.Flowlogs } + if o.Forwardingrules != nil { toSerialize["forwardingrules"] = o.Forwardingrules } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule.go index 8f054c885..d5018315f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule.go @@ -16,14 +16,14 @@ import ( // NetworkLoadBalancerForwardingRule struct for NetworkLoadBalancerForwardingRule type NetworkLoadBalancerForwardingRule struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *NetworkLoadBalancerForwardingRuleProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNetworkLoadBalancerForwardingRule instantiates a new NetworkLoadBalancerForwardingRule object @@ -46,190 +46,190 @@ func NewNetworkLoadBalancerForwardingRuleWithDefaults() *NetworkLoadBalancerForw return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRule) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRule) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRule) GetIdOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRule) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NetworkLoadBalancerForwardingRule) SetId(v string) { +// SetHref sets field value +func (o *NetworkLoadBalancerForwardingRule) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRule) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRule) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NetworkLoadBalancerForwardingRule) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRule) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRule) GetTypeOk() (*Type, bool) { +func (o *NetworkLoadBalancerForwardingRule) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NetworkLoadBalancerForwardingRule) SetType(v Type) { +// SetId sets field value +func (o *NetworkLoadBalancerForwardingRule) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRule) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRule) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRule) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRule) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRule) GetHrefOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *NetworkLoadBalancerForwardingRule) SetHref(v string) { +// SetMetadata sets field value +func (o *NetworkLoadBalancerForwardingRule) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRule) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRule) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *NetworkLoadBalancerForwardingRule) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRule) GetProperties() *NetworkLoadBalancerForwardingRuleProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRule) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *NetworkLoadBalancerForwardingRule) GetPropertiesOk() (*NetworkLoadBalancerForwardingRuleProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *NetworkLoadBalancerForwardingRule) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *NetworkLoadBalancerForwardingRule) SetProperties(v NetworkLoadBalancerForwardingRuleProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRule) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRule) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerForwardingRuleProperties will be returned -func (o *NetworkLoadBalancerForwardingRule) GetProperties() *NetworkLoadBalancerForwardingRuleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRule) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRule) GetPropertiesOk() (*NetworkLoadBalancerForwardingRuleProperties, bool) { +func (o *NetworkLoadBalancerForwardingRule) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NetworkLoadBalancerForwardingRule) SetProperties(v NetworkLoadBalancerForwardingRuleProperties) { +// SetType sets field value +func (o *NetworkLoadBalancerForwardingRule) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRule) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRule) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *NetworkLoadBalancerForwardingRule) HasProperties() bool { func (o NetworkLoadBalancerForwardingRule) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_health_check.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_health_check.go index 3fa9df4f4..cd16a13ef 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_health_check.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_health_check.go @@ -20,10 +20,10 @@ type NetworkLoadBalancerForwardingRuleHealthCheck struct { ClientTimeout *int32 `json:"clientTimeout,omitempty"` // The maximum time in milliseconds to wait for a connection attempt to a target to succeed; default is 5000 (five seconds). ConnectTimeout *int32 `json:"connectTimeout,omitempty"` - // The maximum time in milliseconds that a target can remain inactive; default is 50,000 (50 seconds). - TargetTimeout *int32 `json:"targetTimeout,omitempty"` // The maximum number of attempts to reconnect to a target after a connection failure. Valid range is 0 to 65535 and default is three reconnection attempts. Retries *int32 `json:"retries,omitempty"` + // The maximum time in milliseconds that a target can remain inactive; default is 50,000 (50 seconds). + TargetTimeout *int32 `json:"targetTimeout,omitempty"` } // NewNetworkLoadBalancerForwardingRuleHealthCheck instantiates a new NetworkLoadBalancerForwardingRuleHealthCheck object @@ -45,7 +45,7 @@ func NewNetworkLoadBalancerForwardingRuleHealthCheckWithDefaults() *NetworkLoadB } // GetClientTimeout returns the ClientTimeout field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetClientTimeout() *int32 { if o == nil { return nil @@ -83,7 +83,7 @@ func (o *NetworkLoadBalancerForwardingRuleHealthCheck) HasClientTimeout() bool { } // GetConnectTimeout returns the ConnectTimeout field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetConnectTimeout() *int32 { if o == nil { return nil @@ -120,76 +120,76 @@ func (o *NetworkLoadBalancerForwardingRuleHealthCheck) HasConnectTimeout() bool return false } -// GetTargetTimeout returns the TargetTimeout field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetTargetTimeout() *int32 { +// GetRetries returns the Retries field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetRetries() *int32 { if o == nil { return nil } - return o.TargetTimeout + return o.Retries } -// GetTargetTimeoutOk returns a tuple with the TargetTimeout field value +// GetRetriesOk returns a tuple with the Retries field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetTargetTimeoutOk() (*int32, bool) { +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetRetriesOk() (*int32, bool) { if o == nil { return nil, false } - return o.TargetTimeout, true + return o.Retries, true } -// SetTargetTimeout sets field value -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) SetTargetTimeout(v int32) { +// SetRetries sets field value +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) SetRetries(v int32) { - o.TargetTimeout = &v + o.Retries = &v } -// HasTargetTimeout returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) HasTargetTimeout() bool { - if o != nil && o.TargetTimeout != nil { +// HasRetries returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) HasRetries() bool { + if o != nil && o.Retries != nil { return true } return false } -// GetRetries returns the Retries field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetRetries() *int32 { +// GetTargetTimeout returns the TargetTimeout field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetTargetTimeout() *int32 { if o == nil { return nil } - return o.Retries + return o.TargetTimeout } -// GetRetriesOk returns a tuple with the Retries field value +// GetTargetTimeoutOk returns a tuple with the TargetTimeout field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetRetriesOk() (*int32, bool) { +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) GetTargetTimeoutOk() (*int32, bool) { if o == nil { return nil, false } - return o.Retries, true + return o.TargetTimeout, true } -// SetRetries sets field value -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) SetRetries(v int32) { +// SetTargetTimeout sets field value +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) SetTargetTimeout(v int32) { - o.Retries = &v + o.TargetTimeout = &v } -// HasRetries returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRuleHealthCheck) HasRetries() bool { - if o != nil && o.Retries != nil { +// HasTargetTimeout returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRuleHealthCheck) HasTargetTimeout() bool { + if o != nil && o.TargetTimeout != nil { return true } @@ -201,15 +201,19 @@ func (o NetworkLoadBalancerForwardingRuleHealthCheck) MarshalJSON() ([]byte, err if o.ClientTimeout != nil { toSerialize["clientTimeout"] = o.ClientTimeout } + if o.ConnectTimeout != nil { toSerialize["connectTimeout"] = o.ConnectTimeout } - if o.TargetTimeout != nil { - toSerialize["targetTimeout"] = o.TargetTimeout - } + if o.Retries != nil { toSerialize["retries"] = o.Retries } + + if o.TargetTimeout != nil { + toSerialize["targetTimeout"] = o.TargetTimeout + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go index 35e452787..c3131afa1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go @@ -16,17 +16,17 @@ import ( // NetworkLoadBalancerForwardingRuleProperties struct for NetworkLoadBalancerForwardingRuleProperties type NetworkLoadBalancerForwardingRuleProperties struct { - // The name of the Network Load Balancer forwarding rule. - Name *string `json:"name"` // Balancing algorithm - Algorithm *string `json:"algorithm"` - // Balancing protocol - Protocol *string `json:"protocol"` + Algorithm *string `json:"algorithm"` + HealthCheck *NetworkLoadBalancerForwardingRuleHealthCheck `json:"healthCheck,omitempty"` // Listening (inbound) IP. ListenerIp *string `json:"listenerIp"` // Listening (inbound) port number; valid range is 1 to 65535. - ListenerPort *int32 `json:"listenerPort"` - HealthCheck *NetworkLoadBalancerForwardingRuleHealthCheck `json:"healthCheck,omitempty"` + ListenerPort *int32 `json:"listenerPort"` + // The name of the Network Load Balancer forwarding rule. + Name *string `json:"name"` + // Balancing protocol + Protocol *string `json:"protocol"` // Array of items in the collection. Targets *[]NetworkLoadBalancerForwardingRuleTarget `json:"targets"` } @@ -35,14 +35,14 @@ type NetworkLoadBalancerForwardingRuleProperties struct { // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewNetworkLoadBalancerForwardingRuleProperties(name string, algorithm string, protocol string, listenerIp string, listenerPort int32, targets []NetworkLoadBalancerForwardingRuleTarget) *NetworkLoadBalancerForwardingRuleProperties { +func NewNetworkLoadBalancerForwardingRuleProperties(algorithm string, listenerIp string, listenerPort int32, name string, protocol string, targets []NetworkLoadBalancerForwardingRuleTarget) *NetworkLoadBalancerForwardingRuleProperties { this := NetworkLoadBalancerForwardingRuleProperties{} - this.Name = &name this.Algorithm = &algorithm - this.Protocol = &protocol this.ListenerIp = &listenerIp this.ListenerPort = &listenerPort + this.Name = &name + this.Protocol = &protocol this.Targets = &targets return &this @@ -56,46 +56,8 @@ func NewNetworkLoadBalancerForwardingRulePropertiesWithDefaults() *NetworkLoadBa return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRuleProperties) GetName() *string { - if o == nil { - return nil - } - - return o.Name - -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRuleProperties) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Name, true -} - -// SetName sets field value -func (o *NetworkLoadBalancerForwardingRuleProperties) SetName(v string) { - - o.Name = &v - -} - -// HasName returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRuleProperties) HasName() bool { - if o != nil && o.Name != nil { - return true - } - - return false -} - // GetAlgorithm returns the Algorithm field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleProperties) GetAlgorithm() *string { if o == nil { return nil @@ -132,38 +94,38 @@ func (o *NetworkLoadBalancerForwardingRuleProperties) HasAlgorithm() bool { return false } -// GetProtocol returns the Protocol field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRuleProperties) GetProtocol() *string { +// GetHealthCheck returns the HealthCheck field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRuleProperties) GetHealthCheck() *NetworkLoadBalancerForwardingRuleHealthCheck { if o == nil { return nil } - return o.Protocol + return o.HealthCheck } -// GetProtocolOk returns a tuple with the Protocol field value +// GetHealthCheckOk returns a tuple with the HealthCheck field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRuleProperties) GetProtocolOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRuleProperties) GetHealthCheckOk() (*NetworkLoadBalancerForwardingRuleHealthCheck, bool) { if o == nil { return nil, false } - return o.Protocol, true + return o.HealthCheck, true } -// SetProtocol sets field value -func (o *NetworkLoadBalancerForwardingRuleProperties) SetProtocol(v string) { +// SetHealthCheck sets field value +func (o *NetworkLoadBalancerForwardingRuleProperties) SetHealthCheck(v NetworkLoadBalancerForwardingRuleHealthCheck) { - o.Protocol = &v + o.HealthCheck = &v } -// HasProtocol returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRuleProperties) HasProtocol() bool { - if o != nil && o.Protocol != nil { +// HasHealthCheck returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRuleProperties) HasHealthCheck() bool { + if o != nil && o.HealthCheck != nil { return true } @@ -171,7 +133,7 @@ func (o *NetworkLoadBalancerForwardingRuleProperties) HasProtocol() bool { } // GetListenerIp returns the ListenerIp field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleProperties) GetListenerIp() *string { if o == nil { return nil @@ -209,7 +171,7 @@ func (o *NetworkLoadBalancerForwardingRuleProperties) HasListenerIp() bool { } // GetListenerPort returns the ListenerPort field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleProperties) GetListenerPort() *int32 { if o == nil { return nil @@ -246,38 +208,76 @@ func (o *NetworkLoadBalancerForwardingRuleProperties) HasListenerPort() bool { return false } -// GetHealthCheck returns the HealthCheck field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerForwardingRuleHealthCheck will be returned -func (o *NetworkLoadBalancerForwardingRuleProperties) GetHealthCheck() *NetworkLoadBalancerForwardingRuleHealthCheck { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRuleProperties) GetName() *string { if o == nil { return nil } - return o.HealthCheck + return o.Name } -// GetHealthCheckOk returns a tuple with the HealthCheck field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRuleProperties) GetHealthCheckOk() (*NetworkLoadBalancerForwardingRuleHealthCheck, bool) { +func (o *NetworkLoadBalancerForwardingRuleProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.HealthCheck, true + return o.Name, true } -// SetHealthCheck sets field value -func (o *NetworkLoadBalancerForwardingRuleProperties) SetHealthCheck(v NetworkLoadBalancerForwardingRuleHealthCheck) { +// SetName sets field value +func (o *NetworkLoadBalancerForwardingRuleProperties) SetName(v string) { - o.HealthCheck = &v + o.Name = &v } -// HasHealthCheck returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRuleProperties) HasHealthCheck() bool { - if o != nil && o.HealthCheck != nil { +// HasName returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRuleProperties) HasName() bool { + if o != nil && o.Name != nil { + return true + } + + return false +} + +// GetProtocol returns the Protocol field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRuleProperties) GetProtocol() *string { + if o == nil { + return nil + } + + return o.Protocol + +} + +// GetProtocolOk returns a tuple with the Protocol field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NetworkLoadBalancerForwardingRuleProperties) GetProtocolOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Protocol, true +} + +// SetProtocol sets field value +func (o *NetworkLoadBalancerForwardingRuleProperties) SetProtocol(v string) { + + o.Protocol = &v + +} + +// HasProtocol returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRuleProperties) HasProtocol() bool { + if o != nil && o.Protocol != nil { return true } @@ -285,7 +285,7 @@ func (o *NetworkLoadBalancerForwardingRuleProperties) HasHealthCheck() bool { } // GetTargets returns the Targets field value -// If the value is explicit nil, the zero value for []NetworkLoadBalancerForwardingRuleTarget will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleProperties) GetTargets() *[]NetworkLoadBalancerForwardingRuleTarget { if o == nil { return nil @@ -324,27 +324,34 @@ func (o *NetworkLoadBalancerForwardingRuleProperties) HasTargets() bool { func (o NetworkLoadBalancerForwardingRuleProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } if o.Algorithm != nil { toSerialize["algorithm"] = o.Algorithm } - if o.Protocol != nil { - toSerialize["protocol"] = o.Protocol + + if o.HealthCheck != nil { + toSerialize["healthCheck"] = o.HealthCheck } + if o.ListenerIp != nil { toSerialize["listenerIp"] = o.ListenerIp } + if o.ListenerPort != nil { toSerialize["listenerPort"] = o.ListenerPort } - if o.HealthCheck != nil { - toSerialize["healthCheck"] = o.HealthCheck + + if o.Name != nil { + toSerialize["name"] = o.Name } + + if o.Protocol != nil { + toSerialize["protocol"] = o.Protocol + } + if o.Targets != nil { toSerialize["targets"] = o.Targets } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_put.go index ee8f95aea..16c6e39bb 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_put.go @@ -16,13 +16,13 @@ import ( // NetworkLoadBalancerForwardingRulePut struct for NetworkLoadBalancerForwardingRulePut type NetworkLoadBalancerForwardingRulePut struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *NetworkLoadBalancerForwardingRuleProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *NetworkLoadBalancerForwardingRuleProperties `json:"properties"` } // NewNetworkLoadBalancerForwardingRulePut instantiates a new NetworkLoadBalancerForwardingRulePut object @@ -45,152 +45,152 @@ func NewNetworkLoadBalancerForwardingRulePutWithDefaults() *NetworkLoadBalancerF return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRulePut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetIdOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRulePut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NetworkLoadBalancerForwardingRulePut) SetId(v string) { +// SetHref sets field value +func (o *NetworkLoadBalancerForwardingRulePut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRulePut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRulePut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRulePut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetTypeOk() (*Type, bool) { +func (o *NetworkLoadBalancerForwardingRulePut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NetworkLoadBalancerForwardingRulePut) SetType(v Type) { +// SetId sets field value +func (o *NetworkLoadBalancerForwardingRulePut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRulePut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRulePut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRulePut) GetProperties() *NetworkLoadBalancerForwardingRuleProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetHrefOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRulePut) GetPropertiesOk() (*NetworkLoadBalancerForwardingRuleProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *NetworkLoadBalancerForwardingRulePut) SetHref(v string) { +// SetProperties sets field value +func (o *NetworkLoadBalancerForwardingRulePut) SetProperties(v NetworkLoadBalancerForwardingRuleProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRulePut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRulePut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerForwardingRuleProperties will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetProperties() *NetworkLoadBalancerForwardingRuleProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRulePut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRulePut) GetPropertiesOk() (*NetworkLoadBalancerForwardingRuleProperties, bool) { +func (o *NetworkLoadBalancerForwardingRulePut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NetworkLoadBalancerForwardingRulePut) SetProperties(v NetworkLoadBalancerForwardingRuleProperties) { +// SetType sets field value +func (o *NetworkLoadBalancerForwardingRulePut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRulePut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRulePut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *NetworkLoadBalancerForwardingRulePut) HasProperties() bool { func (o NetworkLoadBalancerForwardingRulePut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target.go index 502d5bda8..b1a9e4482 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target.go @@ -16,13 +16,13 @@ import ( // NetworkLoadBalancerForwardingRuleTarget struct for NetworkLoadBalancerForwardingRuleTarget type NetworkLoadBalancerForwardingRuleTarget struct { + HealthCheck *NetworkLoadBalancerForwardingRuleTargetHealthCheck `json:"healthCheck,omitempty"` // The IP of the balanced target VM. Ip *string `json:"ip"` // The port of the balanced target service; valid range is 1 to 65535. Port *int32 `json:"port"` // Traffic is distributed in proportion to target weight, relative to the combined weight of all targets. A target with higher weight receives a greater share of traffic. Valid range is 0 to 256 and default is 1. Targets with weight of 0 do not participate in load balancing but still accept persistent connections. It is best to assign weights in the middle of the range to leave room for later adjustments. - Weight *int32 `json:"weight"` - HealthCheck *NetworkLoadBalancerForwardingRuleTargetHealthCheck `json:"healthCheck,omitempty"` + Weight *int32 `json:"weight"` } // NewNetworkLoadBalancerForwardingRuleTarget instantiates a new NetworkLoadBalancerForwardingRuleTarget object @@ -47,8 +47,46 @@ func NewNetworkLoadBalancerForwardingRuleTargetWithDefaults() *NetworkLoadBalanc return &this } +// GetHealthCheck returns the HealthCheck field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRuleTarget) GetHealthCheck() *NetworkLoadBalancerForwardingRuleTargetHealthCheck { + if o == nil { + return nil + } + + return o.HealthCheck + +} + +// GetHealthCheckOk returns a tuple with the HealthCheck field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NetworkLoadBalancerForwardingRuleTarget) GetHealthCheckOk() (*NetworkLoadBalancerForwardingRuleTargetHealthCheck, bool) { + if o == nil { + return nil, false + } + + return o.HealthCheck, true +} + +// SetHealthCheck sets field value +func (o *NetworkLoadBalancerForwardingRuleTarget) SetHealthCheck(v NetworkLoadBalancerForwardingRuleTargetHealthCheck) { + + o.HealthCheck = &v + +} + +// HasHealthCheck returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRuleTarget) HasHealthCheck() bool { + if o != nil && o.HealthCheck != nil { + return true + } + + return false +} + // GetIp returns the Ip field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleTarget) GetIp() *string { if o == nil { return nil @@ -86,7 +124,7 @@ func (o *NetworkLoadBalancerForwardingRuleTarget) HasIp() bool { } // GetPort returns the Port field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleTarget) GetPort() *int32 { if o == nil { return nil @@ -124,7 +162,7 @@ func (o *NetworkLoadBalancerForwardingRuleTarget) HasPort() bool { } // GetWeight returns the Weight field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleTarget) GetWeight() *int32 { if o == nil { return nil @@ -161,58 +199,24 @@ func (o *NetworkLoadBalancerForwardingRuleTarget) HasWeight() bool { return false } -// GetHealthCheck returns the HealthCheck field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerForwardingRuleTargetHealthCheck will be returned -func (o *NetworkLoadBalancerForwardingRuleTarget) GetHealthCheck() *NetworkLoadBalancerForwardingRuleTargetHealthCheck { - if o == nil { - return nil - } - - return o.HealthCheck - -} - -// GetHealthCheckOk returns a tuple with the HealthCheck field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRuleTarget) GetHealthCheckOk() (*NetworkLoadBalancerForwardingRuleTargetHealthCheck, bool) { - if o == nil { - return nil, false - } - - return o.HealthCheck, true -} - -// SetHealthCheck sets field value -func (o *NetworkLoadBalancerForwardingRuleTarget) SetHealthCheck(v NetworkLoadBalancerForwardingRuleTargetHealthCheck) { - - o.HealthCheck = &v - -} - -// HasHealthCheck returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRuleTarget) HasHealthCheck() bool { - if o != nil && o.HealthCheck != nil { - return true - } - - return false -} - func (o NetworkLoadBalancerForwardingRuleTarget) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.HealthCheck != nil { + toSerialize["healthCheck"] = o.HealthCheck + } + if o.Ip != nil { toSerialize["ip"] = o.Ip } + if o.Port != nil { toSerialize["port"] = o.Port } + if o.Weight != nil { toSerialize["weight"] = o.Weight } - if o.HealthCheck != nil { - toSerialize["healthCheck"] = o.HealthCheck - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target_health_check.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target_health_check.go index 8fac2b6db..8d7e994a3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target_health_check.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_target_health_check.go @@ -43,7 +43,7 @@ func NewNetworkLoadBalancerForwardingRuleTargetHealthCheckWithDefaults() *Networ } // GetCheck returns the Check field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleTargetHealthCheck) GetCheck() *bool { if o == nil { return nil @@ -81,7 +81,7 @@ func (o *NetworkLoadBalancerForwardingRuleTargetHealthCheck) HasCheck() bool { } // GetCheckInterval returns the CheckInterval field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleTargetHealthCheck) GetCheckInterval() *int32 { if o == nil { return nil @@ -119,7 +119,7 @@ func (o *NetworkLoadBalancerForwardingRuleTargetHealthCheck) HasCheckInterval() } // GetMaintenance returns the Maintenance field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRuleTargetHealthCheck) GetMaintenance() *bool { if o == nil { return nil @@ -161,12 +161,15 @@ func (o NetworkLoadBalancerForwardingRuleTargetHealthCheck) MarshalJSON() ([]byt if o.Check != nil { toSerialize["check"] = o.Check } + if o.CheckInterval != nil { toSerialize["checkInterval"] = o.CheckInterval } + if o.Maintenance != nil { toSerialize["maintenance"] = o.Maintenance } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rules.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rules.go index a23c77408..2668d6328 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rules.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rules.go @@ -16,19 +16,19 @@ import ( // NetworkLoadBalancerForwardingRules struct for NetworkLoadBalancerForwardingRules type NetworkLoadBalancerForwardingRules struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]NetworkLoadBalancerForwardingRule `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNetworkLoadBalancerForwardingRules instantiates a new NetworkLoadBalancerForwardingRules object @@ -49,114 +49,114 @@ func NewNetworkLoadBalancerForwardingRulesWithDefaults() *NetworkLoadBalancerFor return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRules) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRules) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRules) GetIdOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRules) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *NetworkLoadBalancerForwardingRules) SetId(v string) { +// SetLinks sets field value +func (o *NetworkLoadBalancerForwardingRules) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRules) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRules) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NetworkLoadBalancerForwardingRules) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRules) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRules) GetTypeOk() (*Type, bool) { +func (o *NetworkLoadBalancerForwardingRules) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *NetworkLoadBalancerForwardingRules) SetType(v Type) { +// SetHref sets field value +func (o *NetworkLoadBalancerForwardingRules) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRules) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRules) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerForwardingRules) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRules) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRules) GetHrefOk() (*string, bool) { +func (o *NetworkLoadBalancerForwardingRules) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *NetworkLoadBalancerForwardingRules) SetHref(v string) { +// SetId sets field value +func (o *NetworkLoadBalancerForwardingRules) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRules) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRules) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *NetworkLoadBalancerForwardingRules) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []NetworkLoadBalancerForwardingRule will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerForwardingRules) GetItems() *[]NetworkLoadBalancerForwardingRule { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *NetworkLoadBalancerForwardingRules) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *NetworkLoadBalancerForwardingRules) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRules) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRules) GetOffsetOk() (*float32, bool) { +func (o *NetworkLoadBalancerForwardingRules) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *NetworkLoadBalancerForwardingRules) SetOffset(v float32) { +// SetLimit sets field value +func (o *NetworkLoadBalancerForwardingRules) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRules) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRules) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *NetworkLoadBalancerForwardingRules) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRules) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRules) GetLimitOk() (*float32, bool) { +func (o *NetworkLoadBalancerForwardingRules) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *NetworkLoadBalancerForwardingRules) SetLimit(v float32) { +// SetOffset sets field value +func (o *NetworkLoadBalancerForwardingRules) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRules) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRules) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *NetworkLoadBalancerForwardingRules) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerForwardingRules) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerForwardingRules) GetLinksOk() (*PaginationLinks, bool) { +func (o *NetworkLoadBalancerForwardingRules) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *NetworkLoadBalancerForwardingRules) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *NetworkLoadBalancerForwardingRules) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *NetworkLoadBalancerForwardingRules) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *NetworkLoadBalancerForwardingRules) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *NetworkLoadBalancerForwardingRules) HasLinks() bool { func (o NetworkLoadBalancerForwardingRules) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_properties.go index 94170d456..114b8b08a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_properties.go @@ -16,27 +16,27 @@ import ( // NetworkLoadBalancerProperties struct for NetworkLoadBalancerProperties type NetworkLoadBalancerProperties struct { - // The name of the Network Load Balancer. - Name *string `json:"name"` - // ID of the listening LAN (inbound). - ListenerLan *int32 `json:"listenerLan"` // Collection of the Network Load Balancer IP addresses. (Inbound and outbound) IPs of the listenerLan must be customer-reserved IPs for public Load Balancers, and private IPs for private Load Balancers. Ips *[]string `json:"ips,omitempty"` - // ID of the balanced private target LAN (outbound). - TargetLan *int32 `json:"targetLan"` // Collection of private IP addresses with subnet mask of the Network Load Balancer. IPs must contain a valid subnet mask. If no IP is provided, the system will generate an IP with /24 subnet. LbPrivateIps *[]string `json:"lbPrivateIps,omitempty"` + // ID of the listening LAN (inbound). + ListenerLan *int32 `json:"listenerLan"` + // The name of the Network Load Balancer. + Name *string `json:"name"` + // ID of the balanced private target LAN (outbound). + TargetLan *int32 `json:"targetLan"` } // NewNetworkLoadBalancerProperties instantiates a new NetworkLoadBalancerProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewNetworkLoadBalancerProperties(name string, listenerLan int32, targetLan int32) *NetworkLoadBalancerProperties { +func NewNetworkLoadBalancerProperties(listenerLan int32, name string, targetLan int32) *NetworkLoadBalancerProperties { this := NetworkLoadBalancerProperties{} - this.Name = &name this.ListenerLan = &listenerLan + this.Name = &name this.TargetLan = &targetLan return &this @@ -50,38 +50,76 @@ func NewNetworkLoadBalancerPropertiesWithDefaults() *NetworkLoadBalancerProperti return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerProperties) GetName() *string { +// GetIps returns the Ips field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerProperties) GetIps() *[]string { if o == nil { return nil } - return o.Name + return o.Ips } -// GetNameOk returns a tuple with the Name field value +// GetIpsOk returns a tuple with the Ips field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerProperties) GetNameOk() (*string, bool) { +func (o *NetworkLoadBalancerProperties) GetIpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Ips, true } -// SetName sets field value -func (o *NetworkLoadBalancerProperties) SetName(v string) { +// SetIps sets field value +func (o *NetworkLoadBalancerProperties) SetIps(v []string) { - o.Name = &v + o.Ips = &v } -// HasName returns a boolean if a field has been set. -func (o *NetworkLoadBalancerProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasIps returns a boolean if a field has been set. +func (o *NetworkLoadBalancerProperties) HasIps() bool { + if o != nil && o.Ips != nil { + return true + } + + return false +} + +// GetLbPrivateIps returns the LbPrivateIps field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerProperties) GetLbPrivateIps() *[]string { + if o == nil { + return nil + } + + return o.LbPrivateIps + +} + +// GetLbPrivateIpsOk returns a tuple with the LbPrivateIps field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NetworkLoadBalancerProperties) GetLbPrivateIpsOk() (*[]string, bool) { + if o == nil { + return nil, false + } + + return o.LbPrivateIps, true +} + +// SetLbPrivateIps sets field value +func (o *NetworkLoadBalancerProperties) SetLbPrivateIps(v []string) { + + o.LbPrivateIps = &v + +} + +// HasLbPrivateIps returns a boolean if a field has been set. +func (o *NetworkLoadBalancerProperties) HasLbPrivateIps() bool { + if o != nil && o.LbPrivateIps != nil { return true } @@ -89,7 +127,7 @@ func (o *NetworkLoadBalancerProperties) HasName() bool { } // GetListenerLan returns the ListenerLan field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerProperties) GetListenerLan() *int32 { if o == nil { return nil @@ -126,38 +164,38 @@ func (o *NetworkLoadBalancerProperties) HasListenerLan() bool { return false } -// GetIps returns the Ips field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *NetworkLoadBalancerProperties) GetIps() *[]string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerProperties) GetName() *string { if o == nil { return nil } - return o.Ips + return o.Name } -// GetIpsOk returns a tuple with the Ips field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerProperties) GetIpsOk() (*[]string, bool) { +func (o *NetworkLoadBalancerProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Ips, true + return o.Name, true } -// SetIps sets field value -func (o *NetworkLoadBalancerProperties) SetIps(v []string) { +// SetName sets field value +func (o *NetworkLoadBalancerProperties) SetName(v string) { - o.Ips = &v + o.Name = &v } -// HasIps returns a boolean if a field has been set. -func (o *NetworkLoadBalancerProperties) HasIps() bool { - if o != nil && o.Ips != nil { +// HasName returns a boolean if a field has been set. +func (o *NetworkLoadBalancerProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -165,7 +203,7 @@ func (o *NetworkLoadBalancerProperties) HasIps() bool { } // GetTargetLan returns the TargetLan field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancerProperties) GetTargetLan() *int32 { if o == nil { return nil @@ -202,61 +240,28 @@ func (o *NetworkLoadBalancerProperties) HasTargetLan() bool { return false } -// GetLbPrivateIps returns the LbPrivateIps field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *NetworkLoadBalancerProperties) GetLbPrivateIps() *[]string { - if o == nil { - return nil +func (o NetworkLoadBalancerProperties) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if o.Ips != nil { + toSerialize["ips"] = o.Ips } - return o.LbPrivateIps - -} - -// GetLbPrivateIpsOk returns a tuple with the LbPrivateIps field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerProperties) GetLbPrivateIpsOk() (*[]string, bool) { - if o == nil { - return nil, false + if o.LbPrivateIps != nil { + toSerialize["lbPrivateIps"] = o.LbPrivateIps } - return o.LbPrivateIps, true -} - -// SetLbPrivateIps sets field value -func (o *NetworkLoadBalancerProperties) SetLbPrivateIps(v []string) { - - o.LbPrivateIps = &v - -} - -// HasLbPrivateIps returns a boolean if a field has been set. -func (o *NetworkLoadBalancerProperties) HasLbPrivateIps() bool { - if o != nil && o.LbPrivateIps != nil { - return true + if o.ListenerLan != nil { + toSerialize["listenerLan"] = o.ListenerLan } - return false -} - -func (o NetworkLoadBalancerProperties) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} if o.Name != nil { toSerialize["name"] = o.Name } - if o.ListenerLan != nil { - toSerialize["listenerLan"] = o.ListenerLan - } - if o.Ips != nil { - toSerialize["ips"] = o.Ips - } + if o.TargetLan != nil { toSerialize["targetLan"] = o.TargetLan } - if o.LbPrivateIps != nil { - toSerialize["lbPrivateIps"] = o.LbPrivateIps - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_put.go index 05bde508c..ec4deb54b 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_put.go @@ -16,13 +16,13 @@ import ( // NetworkLoadBalancerPut struct for NetworkLoadBalancerPut type NetworkLoadBalancerPut struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *NetworkLoadBalancerProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *NetworkLoadBalancerProperties `json:"properties"` } // NewNetworkLoadBalancerPut instantiates a new NetworkLoadBalancerPut object @@ -45,152 +45,152 @@ func NewNetworkLoadBalancerPutWithDefaults() *NetworkLoadBalancerPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerPut) GetIdOk() (*string, bool) { +func (o *NetworkLoadBalancerPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NetworkLoadBalancerPut) SetId(v string) { +// SetHref sets field value +func (o *NetworkLoadBalancerPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NetworkLoadBalancerPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NetworkLoadBalancerPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NetworkLoadBalancerPut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerPut) GetTypeOk() (*Type, bool) { +func (o *NetworkLoadBalancerPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NetworkLoadBalancerPut) SetType(v Type) { +// SetId sets field value +func (o *NetworkLoadBalancerPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NetworkLoadBalancerPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NetworkLoadBalancerPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancerPut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerPut) GetProperties() *NetworkLoadBalancerProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerPut) GetHrefOk() (*string, bool) { +func (o *NetworkLoadBalancerPut) GetPropertiesOk() (*NetworkLoadBalancerProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *NetworkLoadBalancerPut) SetHref(v string) { +// SetProperties sets field value +func (o *NetworkLoadBalancerPut) SetProperties(v NetworkLoadBalancerProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *NetworkLoadBalancerPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NetworkLoadBalancerPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NetworkLoadBalancerProperties will be returned -func (o *NetworkLoadBalancerPut) GetProperties() *NetworkLoadBalancerProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancerPut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancerPut) GetPropertiesOk() (*NetworkLoadBalancerProperties, bool) { +func (o *NetworkLoadBalancerPut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NetworkLoadBalancerPut) SetProperties(v NetworkLoadBalancerProperties) { +// SetType sets field value +func (o *NetworkLoadBalancerPut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NetworkLoadBalancerPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NetworkLoadBalancerPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *NetworkLoadBalancerPut) HasProperties() bool { func (o NetworkLoadBalancerPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancers.go index 91bbe63e1..b4ccbbc87 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancers.go @@ -16,19 +16,19 @@ import ( // NetworkLoadBalancers struct for NetworkLoadBalancers type NetworkLoadBalancers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]NetworkLoadBalancer `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNetworkLoadBalancers instantiates a new NetworkLoadBalancers object @@ -49,114 +49,114 @@ func NewNetworkLoadBalancersWithDefaults() *NetworkLoadBalancers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancers) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancers) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancers) GetIdOk() (*string, bool) { +func (o *NetworkLoadBalancers) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *NetworkLoadBalancers) SetId(v string) { +// SetLinks sets field value +func (o *NetworkLoadBalancers) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *NetworkLoadBalancers) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *NetworkLoadBalancers) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NetworkLoadBalancers) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancers) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancers) GetTypeOk() (*Type, bool) { +func (o *NetworkLoadBalancers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *NetworkLoadBalancers) SetType(v Type) { +// SetHref sets field value +func (o *NetworkLoadBalancers) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *NetworkLoadBalancers) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *NetworkLoadBalancers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NetworkLoadBalancers) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancers) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancers) GetHrefOk() (*string, bool) { +func (o *NetworkLoadBalancers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *NetworkLoadBalancers) SetHref(v string) { +// SetId sets field value +func (o *NetworkLoadBalancers) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *NetworkLoadBalancers) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *NetworkLoadBalancers) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *NetworkLoadBalancers) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []NetworkLoadBalancer will be returned +// If the value is explicit nil, nil is returned func (o *NetworkLoadBalancers) GetItems() *[]NetworkLoadBalancer { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *NetworkLoadBalancers) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *NetworkLoadBalancers) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancers) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancers) GetOffsetOk() (*float32, bool) { +func (o *NetworkLoadBalancers) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *NetworkLoadBalancers) SetOffset(v float32) { +// SetLimit sets field value +func (o *NetworkLoadBalancers) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *NetworkLoadBalancers) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *NetworkLoadBalancers) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *NetworkLoadBalancers) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancers) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancers) GetLimitOk() (*float32, bool) { +func (o *NetworkLoadBalancers) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *NetworkLoadBalancers) SetLimit(v float32) { +// SetOffset sets field value +func (o *NetworkLoadBalancers) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *NetworkLoadBalancers) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *NetworkLoadBalancers) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *NetworkLoadBalancers) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NetworkLoadBalancers) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NetworkLoadBalancers) GetLinksOk() (*PaginationLinks, bool) { +func (o *NetworkLoadBalancers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *NetworkLoadBalancers) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *NetworkLoadBalancers) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *NetworkLoadBalancers) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *NetworkLoadBalancers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *NetworkLoadBalancers) HasLinks() bool { func (o NetworkLoadBalancers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic.go index 9d8378c6f..6a9c05537 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic.go @@ -16,15 +16,15 @@ import ( // Nic struct for Nic type Nic struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *NicEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *NicProperties `json:"properties"` - Entities *NicEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNic instantiates a new Nic object @@ -47,114 +47,114 @@ func NewNicWithDefaults() *Nic { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Nic) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Nic) GetEntities() *NicEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nic) GetIdOk() (*string, bool) { +func (o *Nic) GetEntitiesOk() (*NicEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Nic) SetId(v string) { +// SetEntities sets field value +func (o *Nic) SetEntities(v NicEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Nic) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Nic) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Nic) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Nic) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nic) GetTypeOk() (*Type, bool) { +func (o *Nic) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Nic) SetType(v Type) { +// SetHref sets field value +func (o *Nic) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Nic) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Nic) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Nic) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Nic) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nic) GetHrefOk() (*string, bool) { +func (o *Nic) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Nic) SetHref(v string) { +// SetId sets field value +func (o *Nic) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Nic) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Nic) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *Nic) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *Nic) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *Nic) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NicProperties will be returned +// If the value is explicit nil, nil is returned func (o *Nic) GetProperties() *NicProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *Nic) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for NicEntities will be returned -func (o *Nic) GetEntities() *NicEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Nic) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nic) GetEntitiesOk() (*NicEntities, bool) { +func (o *Nic) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Nic) SetEntities(v NicEntities) { +// SetType sets field value +func (o *Nic) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Nic) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Nic) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *Nic) HasEntities() bool { func (o Nic) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_entities.go index b1a5553a7..ab19ab4ac 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_entities.go @@ -16,8 +16,8 @@ import ( // NicEntities struct for NicEntities type NicEntities struct { - Flowlogs *FlowLogs `json:"flowlogs,omitempty"` Firewallrules *FirewallRules `json:"firewallrules,omitempty"` + Flowlogs *FlowLogs `json:"flowlogs,omitempty"` } // NewNicEntities instantiates a new NicEntities object @@ -38,76 +38,76 @@ func NewNicEntitiesWithDefaults() *NicEntities { return &this } -// GetFlowlogs returns the Flowlogs field value -// If the value is explicit nil, the zero value for FlowLogs will be returned -func (o *NicEntities) GetFlowlogs() *FlowLogs { +// GetFirewallrules returns the Firewallrules field value +// If the value is explicit nil, nil is returned +func (o *NicEntities) GetFirewallrules() *FirewallRules { if o == nil { return nil } - return o.Flowlogs + return o.Firewallrules } -// GetFlowlogsOk returns a tuple with the Flowlogs field value +// GetFirewallrulesOk returns a tuple with the Firewallrules field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicEntities) GetFlowlogsOk() (*FlowLogs, bool) { +func (o *NicEntities) GetFirewallrulesOk() (*FirewallRules, bool) { if o == nil { return nil, false } - return o.Flowlogs, true + return o.Firewallrules, true } -// SetFlowlogs sets field value -func (o *NicEntities) SetFlowlogs(v FlowLogs) { +// SetFirewallrules sets field value +func (o *NicEntities) SetFirewallrules(v FirewallRules) { - o.Flowlogs = &v + o.Firewallrules = &v } -// HasFlowlogs returns a boolean if a field has been set. -func (o *NicEntities) HasFlowlogs() bool { - if o != nil && o.Flowlogs != nil { +// HasFirewallrules returns a boolean if a field has been set. +func (o *NicEntities) HasFirewallrules() bool { + if o != nil && o.Firewallrules != nil { return true } return false } -// GetFirewallrules returns the Firewallrules field value -// If the value is explicit nil, the zero value for FirewallRules will be returned -func (o *NicEntities) GetFirewallrules() *FirewallRules { +// GetFlowlogs returns the Flowlogs field value +// If the value is explicit nil, nil is returned +func (o *NicEntities) GetFlowlogs() *FlowLogs { if o == nil { return nil } - return o.Firewallrules + return o.Flowlogs } -// GetFirewallrulesOk returns a tuple with the Firewallrules field value +// GetFlowlogsOk returns a tuple with the Flowlogs field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicEntities) GetFirewallrulesOk() (*FirewallRules, bool) { +func (o *NicEntities) GetFlowlogsOk() (*FlowLogs, bool) { if o == nil { return nil, false } - return o.Firewallrules, true + return o.Flowlogs, true } -// SetFirewallrules sets field value -func (o *NicEntities) SetFirewallrules(v FirewallRules) { +// SetFlowlogs sets field value +func (o *NicEntities) SetFlowlogs(v FlowLogs) { - o.Firewallrules = &v + o.Flowlogs = &v } -// HasFirewallrules returns a boolean if a field has been set. -func (o *NicEntities) HasFirewallrules() bool { - if o != nil && o.Firewallrules != nil { +// HasFlowlogs returns a boolean if a field has been set. +func (o *NicEntities) HasFlowlogs() bool { + if o != nil && o.Flowlogs != nil { return true } @@ -116,12 +116,14 @@ func (o *NicEntities) HasFirewallrules() bool { func (o NicEntities) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Flowlogs != nil { - toSerialize["flowlogs"] = o.Flowlogs - } if o.Firewallrules != nil { toSerialize["firewallrules"] = o.Firewallrules } + + if o.Flowlogs != nil { + toSerialize["flowlogs"] = o.Flowlogs + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go index 20d0323ac..cda1f50b0 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go @@ -16,24 +16,36 @@ import ( // NicProperties struct for NicProperties type NicProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` - // The MAC address of the NIC. - Mac *string `json:"mac,omitempty"` - // Collection of IP addresses, assigned to the NIC. Explicitly assigned public IPs need to come from reserved IP blocks. Passing value null or empty array will assign an IP address automatically. - Ips *[]string `json:"ips,omitempty"` + // The Logical Unit Number (LUN) of the storage volume. Null if this NIC was created using Cloud API and no DCD changes were performed on the Datacenter. + DeviceNumber *int32 `json:"deviceNumber,omitempty"` // Indicates if the NIC will reserve an IP using DHCP. Dhcp *bool `json:"dhcp,omitempty"` - // The LAN ID the NIC will be on. If the LAN ID does not exist, it will be implicitly created. - Lan *int32 `json:"lan"` + // [The IPv6 feature is in beta phase and not ready for production usage.] Indicates if the NIC will receive an IPv6 using DHCP. It can be set to 'true' or 'false' only if this NIC is connected to an IPv6 enabled LAN. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilbool` can be used, or the setter `SetDhcpv6Nil` + Dhcpv6 *bool `json:"dhcpv6,omitempty"` // Activate or deactivate the firewall. By default, an active firewall without any defined rules will block all incoming network traffic except for the firewall rules that explicitly allows certain protocols, IP addresses and ports. FirewallActive *bool `json:"firewallActive,omitempty"` // The type of firewall rules that will be allowed on the NIC. If not specified, the default INGRESS value is used. FirewallType *string `json:"firewallType,omitempty"` - // The Logical Unit Number (LUN) of the storage volume. Null if this NIC was created using Cloud API and no DCD changes were performed on the Datacenter. - DeviceNumber *int32 `json:"deviceNumber,omitempty"` + // Collection of IP addresses, assigned to the NIC. Explicitly assigned public IPs need to come from reserved IP blocks. Passing value null or empty array will assign an IP address automatically. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nil[]string` can be used, or the setter `SetIpsNil` + Ips *[]string `json:"ips,omitempty"` + // [The IPv6 feature is in beta phase and not ready for production usage.] If this NIC is connected to an IPv6 enabled LAN then this property contains the /80 IPv6 CIDR block of the NIC. If you leave this property 'null' when adding a NIC to an IPv6-enabled LAN, then an IPv6 CIDR block will automatically be assigned to the NIC, but you can also specify an /80 IPv6 CIDR block for the NIC on your own, which must be inside the /64 IPv6 CIDR block of the LAN and unique. This value can only be set, if the LAN already has an IPv6 CIDR block assigned. An IPv6-enabled LAN is limited to a maximum of 65,536 NICs. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetIpv6CidrBlockNil` + Ipv6CidrBlock *string `json:"ipv6CidrBlock,omitempty"` + // [The IPv6 feature is in beta phase and not ready for production usage.] If this NIC is connected to an IPv6 enabled LAN then this property contains the IPv6 IP addresses of the NIC. The maximum number of IPv6 IP addresses per NIC is 50, if you need more, contact support. If you leave this property 'null' when adding a NIC, when changing the NIC's IPv6 CIDR block, when changing the LAN's IPv6 CIDR block or when moving the NIC to a different IPv6 enabled LAN, then we will automatically assign the same number of IPv6 addresses which you had before from the NICs new CIDR block. If you leave this property 'null' while not changing the CIDR block, the IPv6 IP addresses won't be changed either. You can also provide your own self choosen IPv6 addresses, which then must be inside the IPv6 CIDR block of this NIC. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nil[]string` can be used, or the setter `SetIpv6IpsNil` + Ipv6Ips *[]string `json:"ipv6Ips,omitempty"` + // The LAN ID the NIC will be on. If the LAN ID does not exist, it will be implicitly created. + Lan *int32 `json:"lan"` + // The MAC address of the NIC. + Mac *string `json:"mac,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` // The PCI slot number for the NIC. PciSlot *int32 `json:"pciSlot,omitempty"` + // The vnet ID that belongs to this NIC; Requires system privileges + Vnet *string `json:"vnet,omitempty"` } // NewNicProperties instantiates a new NicProperties object @@ -60,76 +72,195 @@ func NewNicPropertiesWithDefaults() *NicProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NicProperties) GetName() *string { +// GetDeviceNumber returns the DeviceNumber field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetDeviceNumber() *int32 { if o == nil { return nil } - return o.Name + return o.DeviceNumber } -// GetNameOk returns a tuple with the Name field value +// GetDeviceNumberOk returns a tuple with the DeviceNumber field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetNameOk() (*string, bool) { +func (o *NicProperties) GetDeviceNumberOk() (*int32, bool) { if o == nil { return nil, false } - return o.Name, true + return o.DeviceNumber, true } -// SetName sets field value -func (o *NicProperties) SetName(v string) { +// SetDeviceNumber sets field value +func (o *NicProperties) SetDeviceNumber(v int32) { - o.Name = &v + o.DeviceNumber = &v } -// HasName returns a boolean if a field has been set. -func (o *NicProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasDeviceNumber returns a boolean if a field has been set. +func (o *NicProperties) HasDeviceNumber() bool { + if o != nil && o.DeviceNumber != nil { return true } return false } -// GetMac returns the Mac field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NicProperties) GetMac() *string { +// GetDhcp returns the Dhcp field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetDhcp() *bool { if o == nil { return nil } - return o.Mac + return o.Dhcp } -// GetMacOk returns a tuple with the Mac field value +// GetDhcpOk returns a tuple with the Dhcp field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetMacOk() (*string, bool) { +func (o *NicProperties) GetDhcpOk() (*bool, bool) { if o == nil { return nil, false } - return o.Mac, true + return o.Dhcp, true } -// SetMac sets field value -func (o *NicProperties) SetMac(v string) { +// SetDhcp sets field value +func (o *NicProperties) SetDhcp(v bool) { - o.Mac = &v + o.Dhcp = &v } -// HasMac returns a boolean if a field has been set. -func (o *NicProperties) HasMac() bool { - if o != nil && o.Mac != nil { +// HasDhcp returns a boolean if a field has been set. +func (o *NicProperties) HasDhcp() bool { + if o != nil && o.Dhcp != nil { + return true + } + + return false +} + +// GetDhcpv6 returns the Dhcpv6 field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetDhcpv6() *bool { + if o == nil { + return nil + } + + return o.Dhcpv6 + +} + +// GetDhcpv6Ok returns a tuple with the Dhcpv6 field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NicProperties) GetDhcpv6Ok() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.Dhcpv6, true +} + +// SetDhcpv6 sets field value +func (o *NicProperties) SetDhcpv6(v bool) { + + o.Dhcpv6 = &v + +} + +// sets Dhcpv6 to the explicit address that will be encoded as nil when marshaled +func (o *NicProperties) SetDhcpv6Nil() { + o.Dhcpv6 = &Nilbool +} + +// HasDhcpv6 returns a boolean if a field has been set. +func (o *NicProperties) HasDhcpv6() bool { + if o != nil && o.Dhcpv6 != nil { + return true + } + + return false +} + +// GetFirewallActive returns the FirewallActive field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetFirewallActive() *bool { + if o == nil { + return nil + } + + return o.FirewallActive + +} + +// GetFirewallActiveOk returns a tuple with the FirewallActive field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NicProperties) GetFirewallActiveOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.FirewallActive, true +} + +// SetFirewallActive sets field value +func (o *NicProperties) SetFirewallActive(v bool) { + + o.FirewallActive = &v + +} + +// HasFirewallActive returns a boolean if a field has been set. +func (o *NicProperties) HasFirewallActive() bool { + if o != nil && o.FirewallActive != nil { + return true + } + + return false +} + +// GetFirewallType returns the FirewallType field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetFirewallType() *string { + if o == nil { + return nil + } + + return o.FirewallType + +} + +// GetFirewallTypeOk returns a tuple with the FirewallType field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NicProperties) GetFirewallTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.FirewallType, true +} + +// SetFirewallType sets field value +func (o *NicProperties) SetFirewallType(v string) { + + o.FirewallType = &v + +} + +// HasFirewallType returns a boolean if a field has been set. +func (o *NicProperties) HasFirewallType() bool { + if o != nil && o.FirewallType != nil { return true } @@ -137,7 +268,7 @@ func (o *NicProperties) HasMac() bool { } // GetIps returns the Ips field value -// If the value is explicit nil, the zero value for []string will be returned +// If the value is explicit nil, nil is returned func (o *NicProperties) GetIps() *[]string { if o == nil { return nil @@ -174,190 +305,195 @@ func (o *NicProperties) HasIps() bool { return false } -// GetDhcp returns the Dhcp field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *NicProperties) GetDhcp() *bool { +// GetIpv6CidrBlock returns the Ipv6CidrBlock field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetIpv6CidrBlock() *string { if o == nil { return nil } - return o.Dhcp + return o.Ipv6CidrBlock } -// GetDhcpOk returns a tuple with the Dhcp field value +// GetIpv6CidrBlockOk returns a tuple with the Ipv6CidrBlock field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetDhcpOk() (*bool, bool) { +func (o *NicProperties) GetIpv6CidrBlockOk() (*string, bool) { if o == nil { return nil, false } - return o.Dhcp, true + return o.Ipv6CidrBlock, true } -// SetDhcp sets field value -func (o *NicProperties) SetDhcp(v bool) { +// SetIpv6CidrBlock sets field value +func (o *NicProperties) SetIpv6CidrBlock(v string) { - o.Dhcp = &v + o.Ipv6CidrBlock = &v } -// HasDhcp returns a boolean if a field has been set. -func (o *NicProperties) HasDhcp() bool { - if o != nil && o.Dhcp != nil { +// sets Ipv6CidrBlock to the explicit address that will be encoded as nil when marshaled +func (o *NicProperties) SetIpv6CidrBlockNil() { + o.Ipv6CidrBlock = &Nilstring +} + +// HasIpv6CidrBlock returns a boolean if a field has been set. +func (o *NicProperties) HasIpv6CidrBlock() bool { + if o != nil && o.Ipv6CidrBlock != nil { return true } return false } -// GetLan returns the Lan field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *NicProperties) GetLan() *int32 { +// GetIpv6Ips returns the Ipv6Ips field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetIpv6Ips() *[]string { if o == nil { return nil } - return o.Lan + return o.Ipv6Ips } -// GetLanOk returns a tuple with the Lan field value +// GetIpv6IpsOk returns a tuple with the Ipv6Ips field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetLanOk() (*int32, bool) { +func (o *NicProperties) GetIpv6IpsOk() (*[]string, bool) { if o == nil { return nil, false } - return o.Lan, true + return o.Ipv6Ips, true } -// SetLan sets field value -func (o *NicProperties) SetLan(v int32) { +// SetIpv6Ips sets field value +func (o *NicProperties) SetIpv6Ips(v []string) { - o.Lan = &v + o.Ipv6Ips = &v } -// HasLan returns a boolean if a field has been set. -func (o *NicProperties) HasLan() bool { - if o != nil && o.Lan != nil { +// HasIpv6Ips returns a boolean if a field has been set. +func (o *NicProperties) HasIpv6Ips() bool { + if o != nil && o.Ipv6Ips != nil { return true } return false } -// GetFirewallActive returns the FirewallActive field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *NicProperties) GetFirewallActive() *bool { +// GetLan returns the Lan field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetLan() *int32 { if o == nil { return nil } - return o.FirewallActive + return o.Lan } -// GetFirewallActiveOk returns a tuple with the FirewallActive field value +// GetLanOk returns a tuple with the Lan field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetFirewallActiveOk() (*bool, bool) { +func (o *NicProperties) GetLanOk() (*int32, bool) { if o == nil { return nil, false } - return o.FirewallActive, true + return o.Lan, true } -// SetFirewallActive sets field value -func (o *NicProperties) SetFirewallActive(v bool) { +// SetLan sets field value +func (o *NicProperties) SetLan(v int32) { - o.FirewallActive = &v + o.Lan = &v } -// HasFirewallActive returns a boolean if a field has been set. -func (o *NicProperties) HasFirewallActive() bool { - if o != nil && o.FirewallActive != nil { +// HasLan returns a boolean if a field has been set. +func (o *NicProperties) HasLan() bool { + if o != nil && o.Lan != nil { return true } return false } -// GetFirewallType returns the FirewallType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NicProperties) GetFirewallType() *string { +// GetMac returns the Mac field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetMac() *string { if o == nil { return nil } - return o.FirewallType + return o.Mac } -// GetFirewallTypeOk returns a tuple with the FirewallType field value +// GetMacOk returns a tuple with the Mac field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetFirewallTypeOk() (*string, bool) { +func (o *NicProperties) GetMacOk() (*string, bool) { if o == nil { return nil, false } - return o.FirewallType, true + return o.Mac, true } -// SetFirewallType sets field value -func (o *NicProperties) SetFirewallType(v string) { +// SetMac sets field value +func (o *NicProperties) SetMac(v string) { - o.FirewallType = &v + o.Mac = &v } -// HasFirewallType returns a boolean if a field has been set. -func (o *NicProperties) HasFirewallType() bool { - if o != nil && o.FirewallType != nil { +// HasMac returns a boolean if a field has been set. +func (o *NicProperties) HasMac() bool { + if o != nil && o.Mac != nil { return true } return false } -// GetDeviceNumber returns the DeviceNumber field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *NicProperties) GetDeviceNumber() *int32 { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetName() *string { if o == nil { return nil } - return o.DeviceNumber + return o.Name } -// GetDeviceNumberOk returns a tuple with the DeviceNumber field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetDeviceNumberOk() (*int32, bool) { +func (o *NicProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.DeviceNumber, true + return o.Name, true } -// SetDeviceNumber sets field value -func (o *NicProperties) SetDeviceNumber(v int32) { +// SetName sets field value +func (o *NicProperties) SetName(v string) { - o.DeviceNumber = &v + o.Name = &v } -// HasDeviceNumber returns a boolean if a field has been set. -func (o *NicProperties) HasDeviceNumber() bool { - if o != nil && o.DeviceNumber != nil { +// HasName returns a boolean if a field has been set. +func (o *NicProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -365,7 +501,7 @@ func (o *NicProperties) HasDeviceNumber() bool { } // GetPciSlot returns the PciSlot field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *NicProperties) GetPciSlot() *int32 { if o == nil { return nil @@ -402,33 +538,100 @@ func (o *NicProperties) HasPciSlot() bool { return false } +// GetVnet returns the Vnet field value +// If the value is explicit nil, nil is returned +func (o *NicProperties) GetVnet() *string { + if o == nil { + return nil + } + + return o.Vnet + +} + +// GetVnetOk returns a tuple with the Vnet field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NicProperties) GetVnetOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Vnet, true +} + +// SetVnet sets field value +func (o *NicProperties) SetVnet(v string) { + + o.Vnet = &v + +} + +// HasVnet returns a boolean if a field has been set. +func (o *NicProperties) HasVnet() bool { + if o != nil && o.Vnet != nil { + return true + } + + return false +} + func (o NicProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } - if o.Mac != nil { - toSerialize["mac"] = o.Mac + if o.DeviceNumber != nil { + toSerialize["deviceNumber"] = o.DeviceNumber } - toSerialize["ips"] = o.Ips + if o.Dhcp != nil { toSerialize["dhcp"] = o.Dhcp } - if o.Lan != nil { - toSerialize["lan"] = o.Lan + + if o.Dhcpv6 == &Nilbool { + toSerialize["dhcpv6"] = nil + } else if o.Dhcpv6 != nil { + toSerialize["dhcpv6"] = o.Dhcpv6 } if o.FirewallActive != nil { toSerialize["firewallActive"] = o.FirewallActive } + if o.FirewallType != nil { toSerialize["firewallType"] = o.FirewallType } - if o.DeviceNumber != nil { - toSerialize["deviceNumber"] = o.DeviceNumber + + if o.Ips != nil { + toSerialize["ips"] = o.Ips + } + + if o.Ipv6CidrBlock == &Nilstring { + toSerialize["ipv6CidrBlock"] = nil + } else if o.Ipv6CidrBlock != nil { + toSerialize["ipv6CidrBlock"] = o.Ipv6CidrBlock + } + + if o.Ipv6Ips != nil { + toSerialize["ipv6Ips"] = o.Ipv6Ips + } + if o.Lan != nil { + toSerialize["lan"] = o.Lan + } + + if o.Mac != nil { + toSerialize["mac"] = o.Mac + } + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.PciSlot != nil { toSerialize["pciSlot"] = o.PciSlot } + + if o.Vnet != nil { + toSerialize["vnet"] = o.Vnet + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_put.go index e37c81075..0029a6eab 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_put.go @@ -16,13 +16,13 @@ import ( // NicPut struct for NicPut type NicPut struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *NicProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *NicProperties `json:"properties"` } // NewNicPut instantiates a new NicPut object @@ -45,152 +45,152 @@ func NewNicPutWithDefaults() *NicPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NicPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *NicPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicPut) GetIdOk() (*string, bool) { +func (o *NicPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *NicPut) SetId(v string) { +// SetHref sets field value +func (o *NicPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *NicPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *NicPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *NicPut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *NicPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicPut) GetTypeOk() (*Type, bool) { +func (o *NicPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *NicPut) SetType(v Type) { +// SetId sets field value +func (o *NicPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *NicPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *NicPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NicPut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *NicPut) GetProperties() *NicProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicPut) GetHrefOk() (*string, bool) { +func (o *NicPut) GetPropertiesOk() (*NicProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *NicPut) SetHref(v string) { +// SetProperties sets field value +func (o *NicPut) SetProperties(v NicProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *NicPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *NicPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for NicProperties will be returned -func (o *NicPut) GetProperties() *NicProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *NicPut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicPut) GetPropertiesOk() (*NicProperties, bool) { +func (o *NicPut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *NicPut) SetProperties(v NicProperties) { +// SetType sets field value +func (o *NicPut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *NicPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *NicPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *NicPut) HasProperties() bool { func (o NicPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nics.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nics.go index 80ff9957f..6fe1220a4 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nics.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nics.go @@ -16,19 +16,19 @@ import ( // Nics struct for Nics type Nics struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Nic `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewNics instantiates a new Nics object @@ -49,114 +49,114 @@ func NewNicsWithDefaults() *Nics { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Nics) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Nics) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nics) GetIdOk() (*string, bool) { +func (o *Nics) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Nics) SetId(v string) { +// SetLinks sets field value +func (o *Nics) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Nics) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Nics) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Nics) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Nics) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nics) GetTypeOk() (*Type, bool) { +func (o *Nics) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Nics) SetType(v Type) { +// SetHref sets field value +func (o *Nics) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Nics) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Nics) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Nics) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Nics) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nics) GetHrefOk() (*string, bool) { +func (o *Nics) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Nics) SetHref(v string) { +// SetId sets field value +func (o *Nics) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Nics) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Nics) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Nics) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Nic will be returned +// If the value is explicit nil, nil is returned func (o *Nics) GetItems() *[]Nic { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Nics) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Nics) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Nics) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nics) GetOffsetOk() (*float32, bool) { +func (o *Nics) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Nics) SetOffset(v float32) { +// SetLimit sets field value +func (o *Nics) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Nics) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Nics) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Nics) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Nics) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nics) GetLimitOk() (*float32, bool) { +func (o *Nics) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Nics) SetLimit(v float32) { +// SetOffset sets field value +func (o *Nics) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Nics) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Nics) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Nics) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Nics) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Nics) GetLinksOk() (*PaginationLinks, bool) { +func (o *Nics) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Nics) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Nics) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Nics) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Nics) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Nics) HasLinks() bool { func (o Nics) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_no_state_meta_data.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_no_state_meta_data.go index 4d1488a1d..bf6d643dd 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_no_state_meta_data.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_no_state_meta_data.go @@ -17,20 +17,20 @@ import ( // NoStateMetaData struct for NoStateMetaData type NoStateMetaData struct { - // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. - Etag *string `json:"etag,omitempty"` - // The time when the resource was created. - CreatedDate *IonosTime // The user who has created the resource. CreatedBy *string `json:"createdBy,omitempty"` // The unique ID of the user who created the resource. CreatedByUserId *string `json:"createdByUserId,omitempty"` - // The last time the resource was modified. - LastModifiedDate *IonosTime + // The time when the resource was created. + CreatedDate *IonosTime + // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. + Etag *string `json:"etag,omitempty"` // The user who last modified the resource. LastModifiedBy *string `json:"lastModifiedBy,omitempty"` // The unique ID of the user who last modified the resource. LastModifiedByUserId *string `json:"lastModifiedByUserId,omitempty"` + // The last time the resource was modified. + LastModifiedDate *IonosTime } // NewNoStateMetaData instantiates a new NoStateMetaData object @@ -51,91 +51,8 @@ func NewNoStateMetaDataWithDefaults() *NoStateMetaData { return &this } -// GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NoStateMetaData) GetEtag() *string { - if o == nil { - return nil - } - - return o.Etag - -} - -// GetEtagOk returns a tuple with the Etag field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NoStateMetaData) GetEtagOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Etag, true -} - -// SetEtag sets field value -func (o *NoStateMetaData) SetEtag(v string) { - - o.Etag = &v - -} - -// HasEtag returns a boolean if a field has been set. -func (o *NoStateMetaData) HasEtag() bool { - if o != nil && o.Etag != nil { - return true - } - - return false -} - -// GetCreatedDate returns the CreatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *NoStateMetaData) GetCreatedDate() *time.Time { - if o == nil { - return nil - } - - if o.CreatedDate == nil { - return nil - } - return &o.CreatedDate.Time - -} - -// GetCreatedDateOk returns a tuple with the CreatedDate field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NoStateMetaData) GetCreatedDateOk() (*time.Time, bool) { - if o == nil { - return nil, false - } - - if o.CreatedDate == nil { - return nil, false - } - return &o.CreatedDate.Time, true - -} - -// SetCreatedDate sets field value -func (o *NoStateMetaData) SetCreatedDate(v time.Time) { - - o.CreatedDate = &IonosTime{v} - -} - -// HasCreatedDate returns a boolean if a field has been set. -func (o *NoStateMetaData) HasCreatedDate() bool { - if o != nil && o.CreatedDate != nil { - return true - } - - return false -} - // GetCreatedBy returns the CreatedBy field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NoStateMetaData) GetCreatedBy() *string { if o == nil { return nil @@ -173,7 +90,7 @@ func (o *NoStateMetaData) HasCreatedBy() bool { } // GetCreatedByUserId returns the CreatedByUserId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NoStateMetaData) GetCreatedByUserId() *string { if o == nil { return nil @@ -210,45 +127,83 @@ func (o *NoStateMetaData) HasCreatedByUserId() bool { return false } -// GetLastModifiedDate returns the LastModifiedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *NoStateMetaData) GetLastModifiedDate() *time.Time { +// GetCreatedDate returns the CreatedDate field value +// If the value is explicit nil, nil is returned +func (o *NoStateMetaData) GetCreatedDate() *time.Time { if o == nil { return nil } - if o.LastModifiedDate == nil { + if o.CreatedDate == nil { return nil } - return &o.LastModifiedDate.Time + return &o.CreatedDate.Time } -// GetLastModifiedDateOk returns a tuple with the LastModifiedDate field value +// GetCreatedDateOk returns a tuple with the CreatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NoStateMetaData) GetLastModifiedDateOk() (*time.Time, bool) { +func (o *NoStateMetaData) GetCreatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - if o.LastModifiedDate == nil { + if o.CreatedDate == nil { return nil, false } - return &o.LastModifiedDate.Time, true + return &o.CreatedDate.Time, true } -// SetLastModifiedDate sets field value -func (o *NoStateMetaData) SetLastModifiedDate(v time.Time) { +// SetCreatedDate sets field value +func (o *NoStateMetaData) SetCreatedDate(v time.Time) { - o.LastModifiedDate = &IonosTime{v} + o.CreatedDate = &IonosTime{v} } -// HasLastModifiedDate returns a boolean if a field has been set. -func (o *NoStateMetaData) HasLastModifiedDate() bool { - if o != nil && o.LastModifiedDate != nil { +// HasCreatedDate returns a boolean if a field has been set. +func (o *NoStateMetaData) HasCreatedDate() bool { + if o != nil && o.CreatedDate != nil { + return true + } + + return false +} + +// GetEtag returns the Etag field value +// If the value is explicit nil, nil is returned +func (o *NoStateMetaData) GetEtag() *string { + if o == nil { + return nil + } + + return o.Etag + +} + +// GetEtagOk returns a tuple with the Etag field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NoStateMetaData) GetEtagOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.Etag, true +} + +// SetEtag sets field value +func (o *NoStateMetaData) SetEtag(v string) { + + o.Etag = &v + +} + +// HasEtag returns a boolean if a field has been set. +func (o *NoStateMetaData) HasEtag() bool { + if o != nil && o.Etag != nil { return true } @@ -256,7 +211,7 @@ func (o *NoStateMetaData) HasLastModifiedDate() bool { } // GetLastModifiedBy returns the LastModifiedBy field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NoStateMetaData) GetLastModifiedBy() *string { if o == nil { return nil @@ -294,7 +249,7 @@ func (o *NoStateMetaData) HasLastModifiedBy() bool { } // GetLastModifiedByUserId returns the LastModifiedByUserId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *NoStateMetaData) GetLastModifiedByUserId() *string { if o == nil { return nil @@ -331,29 +286,81 @@ func (o *NoStateMetaData) HasLastModifiedByUserId() bool { return false } -func (o NoStateMetaData) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} - if o.Etag != nil { - toSerialize["etag"] = o.Etag +// GetLastModifiedDate returns the LastModifiedDate field value +// If the value is explicit nil, nil is returned +func (o *NoStateMetaData) GetLastModifiedDate() *time.Time { + if o == nil { + return nil } - if o.CreatedDate != nil { - toSerialize["createdDate"] = o.CreatedDate + + if o.LastModifiedDate == nil { + return nil + } + return &o.LastModifiedDate.Time + +} + +// GetLastModifiedDateOk returns a tuple with the LastModifiedDate field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *NoStateMetaData) GetLastModifiedDateOk() (*time.Time, bool) { + if o == nil { + return nil, false + } + + if o.LastModifiedDate == nil { + return nil, false + } + return &o.LastModifiedDate.Time, true + +} + +// SetLastModifiedDate sets field value +func (o *NoStateMetaData) SetLastModifiedDate(v time.Time) { + + o.LastModifiedDate = &IonosTime{v} + +} + +// HasLastModifiedDate returns a boolean if a field has been set. +func (o *NoStateMetaData) HasLastModifiedDate() bool { + if o != nil && o.LastModifiedDate != nil { + return true } + + return false +} + +func (o NoStateMetaData) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} if o.CreatedBy != nil { toSerialize["createdBy"] = o.CreatedBy } + if o.CreatedByUserId != nil { toSerialize["createdByUserId"] = o.CreatedByUserId } - if o.LastModifiedDate != nil { - toSerialize["lastModifiedDate"] = o.LastModifiedDate + + if o.CreatedDate != nil { + toSerialize["createdDate"] = o.CreatedDate + } + + if o.Etag != nil { + toSerialize["etag"] = o.Etag } + if o.LastModifiedBy != nil { toSerialize["lastModifiedBy"] = o.LastModifiedBy } + if o.LastModifiedByUserId != nil { toSerialize["lastModifiedByUserId"] = o.LastModifiedByUserId } + + if o.LastModifiedDate != nil { + toSerialize["lastModifiedDate"] = o.LastModifiedDate + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_pagination_links.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_pagination_links.go index fa3c76dee..5b8bef304 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_pagination_links.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_pagination_links.go @@ -16,12 +16,12 @@ import ( // PaginationLinks struct for PaginationLinks type PaginationLinks struct { + // URL (with offset and limit parameters) of the next page; only present if offset + limit is less than the total number of elements. + Next *string `json:"next,omitempty"` // URL (with offset and limit parameters) of the previous page; only present if offset is greater than 0. Prev *string `json:"prev,omitempty"` // URL (with offset and limit parameters) of the current page. Self *string `json:"self,omitempty"` - // URL (with offset and limit parameters) of the next page; only present if offset + limit is less than the total number of elements. - Next *string `json:"next,omitempty"` } // NewPaginationLinks instantiates a new PaginationLinks object @@ -42,114 +42,114 @@ func NewPaginationLinksWithDefaults() *PaginationLinks { return &this } -// GetPrev returns the Prev field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PaginationLinks) GetPrev() *string { +// GetNext returns the Next field value +// If the value is explicit nil, nil is returned +func (o *PaginationLinks) GetNext() *string { if o == nil { return nil } - return o.Prev + return o.Next } -// GetPrevOk returns a tuple with the Prev field value +// GetNextOk returns a tuple with the Next field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PaginationLinks) GetPrevOk() (*string, bool) { +func (o *PaginationLinks) GetNextOk() (*string, bool) { if o == nil { return nil, false } - return o.Prev, true + return o.Next, true } -// SetPrev sets field value -func (o *PaginationLinks) SetPrev(v string) { +// SetNext sets field value +func (o *PaginationLinks) SetNext(v string) { - o.Prev = &v + o.Next = &v } -// HasPrev returns a boolean if a field has been set. -func (o *PaginationLinks) HasPrev() bool { - if o != nil && o.Prev != nil { +// HasNext returns a boolean if a field has been set. +func (o *PaginationLinks) HasNext() bool { + if o != nil && o.Next != nil { return true } return false } -// GetSelf returns the Self field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PaginationLinks) GetSelf() *string { +// GetPrev returns the Prev field value +// If the value is explicit nil, nil is returned +func (o *PaginationLinks) GetPrev() *string { if o == nil { return nil } - return o.Self + return o.Prev } -// GetSelfOk returns a tuple with the Self field value +// GetPrevOk returns a tuple with the Prev field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PaginationLinks) GetSelfOk() (*string, bool) { +func (o *PaginationLinks) GetPrevOk() (*string, bool) { if o == nil { return nil, false } - return o.Self, true + return o.Prev, true } -// SetSelf sets field value -func (o *PaginationLinks) SetSelf(v string) { +// SetPrev sets field value +func (o *PaginationLinks) SetPrev(v string) { - o.Self = &v + o.Prev = &v } -// HasSelf returns a boolean if a field has been set. -func (o *PaginationLinks) HasSelf() bool { - if o != nil && o.Self != nil { +// HasPrev returns a boolean if a field has been set. +func (o *PaginationLinks) HasPrev() bool { + if o != nil && o.Prev != nil { return true } return false } -// GetNext returns the Next field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PaginationLinks) GetNext() *string { +// GetSelf returns the Self field value +// If the value is explicit nil, nil is returned +func (o *PaginationLinks) GetSelf() *string { if o == nil { return nil } - return o.Next + return o.Self } -// GetNextOk returns a tuple with the Next field value +// GetSelfOk returns a tuple with the Self field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PaginationLinks) GetNextOk() (*string, bool) { +func (o *PaginationLinks) GetSelfOk() (*string, bool) { if o == nil { return nil, false } - return o.Next, true + return o.Self, true } -// SetNext sets field value -func (o *PaginationLinks) SetNext(v string) { +// SetSelf sets field value +func (o *PaginationLinks) SetSelf(v string) { - o.Next = &v + o.Self = &v } -// HasNext returns a boolean if a field has been set. -func (o *PaginationLinks) HasNext() bool { - if o != nil && o.Next != nil { +// HasSelf returns a boolean if a field has been set. +func (o *PaginationLinks) HasSelf() bool { + if o != nil && o.Self != nil { return true } @@ -158,15 +158,18 @@ func (o *PaginationLinks) HasNext() bool { func (o PaginationLinks) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.Next != nil { + toSerialize["next"] = o.Next + } + if o.Prev != nil { toSerialize["prev"] = o.Prev } + if o.Self != nil { toSerialize["self"] = o.Self } - if o.Next != nil { - toSerialize["next"] = o.Next - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_peer.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_peer.go index db7c562be..582ff98a9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_peer.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_peer.go @@ -16,11 +16,11 @@ import ( // Peer struct for Peer type Peer struct { - Id *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` DatacenterId *string `json:"datacenterId,omitempty"` DatacenterName *string `json:"datacenterName,omitempty"` + Id *string `json:"id,omitempty"` Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` } // NewPeer instantiates a new Peer object @@ -41,190 +41,190 @@ func NewPeerWithDefaults() *Peer { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Peer) GetId() *string { +// GetDatacenterId returns the DatacenterId field value +// If the value is explicit nil, nil is returned +func (o *Peer) GetDatacenterId() *string { if o == nil { return nil } - return o.Id + return o.DatacenterId } -// GetIdOk returns a tuple with the Id field value +// GetDatacenterIdOk returns a tuple with the DatacenterId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Peer) GetIdOk() (*string, bool) { +func (o *Peer) GetDatacenterIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.DatacenterId, true } -// SetId sets field value -func (o *Peer) SetId(v string) { +// SetDatacenterId sets field value +func (o *Peer) SetDatacenterId(v string) { - o.Id = &v + o.DatacenterId = &v } -// HasId returns a boolean if a field has been set. -func (o *Peer) HasId() bool { - if o != nil && o.Id != nil { +// HasDatacenterId returns a boolean if a field has been set. +func (o *Peer) HasDatacenterId() bool { + if o != nil && o.DatacenterId != nil { return true } return false } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Peer) GetName() *string { +// GetDatacenterName returns the DatacenterName field value +// If the value is explicit nil, nil is returned +func (o *Peer) GetDatacenterName() *string { if o == nil { return nil } - return o.Name + return o.DatacenterName } -// GetNameOk returns a tuple with the Name field value +// GetDatacenterNameOk returns a tuple with the DatacenterName field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Peer) GetNameOk() (*string, bool) { +func (o *Peer) GetDatacenterNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.DatacenterName, true } -// SetName sets field value -func (o *Peer) SetName(v string) { +// SetDatacenterName sets field value +func (o *Peer) SetDatacenterName(v string) { - o.Name = &v + o.DatacenterName = &v } -// HasName returns a boolean if a field has been set. -func (o *Peer) HasName() bool { - if o != nil && o.Name != nil { +// HasDatacenterName returns a boolean if a field has been set. +func (o *Peer) HasDatacenterName() bool { + if o != nil && o.DatacenterName != nil { return true } return false } -// GetDatacenterId returns the DatacenterId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Peer) GetDatacenterId() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Peer) GetId() *string { if o == nil { return nil } - return o.DatacenterId + return o.Id } -// GetDatacenterIdOk returns a tuple with the DatacenterId field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Peer) GetDatacenterIdOk() (*string, bool) { +func (o *Peer) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.DatacenterId, true + return o.Id, true } -// SetDatacenterId sets field value -func (o *Peer) SetDatacenterId(v string) { +// SetId sets field value +func (o *Peer) SetId(v string) { - o.DatacenterId = &v + o.Id = &v } -// HasDatacenterId returns a boolean if a field has been set. -func (o *Peer) HasDatacenterId() bool { - if o != nil && o.DatacenterId != nil { +// HasId returns a boolean if a field has been set. +func (o *Peer) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetDatacenterName returns the DatacenterName field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Peer) GetDatacenterName() *string { +// GetLocation returns the Location field value +// If the value is explicit nil, nil is returned +func (o *Peer) GetLocation() *string { if o == nil { return nil } - return o.DatacenterName + return o.Location } -// GetDatacenterNameOk returns a tuple with the DatacenterName field value +// GetLocationOk returns a tuple with the Location field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Peer) GetDatacenterNameOk() (*string, bool) { +func (o *Peer) GetLocationOk() (*string, bool) { if o == nil { return nil, false } - return o.DatacenterName, true + return o.Location, true } -// SetDatacenterName sets field value -func (o *Peer) SetDatacenterName(v string) { +// SetLocation sets field value +func (o *Peer) SetLocation(v string) { - o.DatacenterName = &v + o.Location = &v } -// HasDatacenterName returns a boolean if a field has been set. -func (o *Peer) HasDatacenterName() bool { - if o != nil && o.DatacenterName != nil { +// HasLocation returns a boolean if a field has been set. +func (o *Peer) HasLocation() bool { + if o != nil && o.Location != nil { return true } return false } -// GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Peer) GetLocation() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *Peer) GetName() *string { if o == nil { return nil } - return o.Location + return o.Name } -// GetLocationOk returns a tuple with the Location field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Peer) GetLocationOk() (*string, bool) { +func (o *Peer) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Location, true + return o.Name, true } -// SetLocation sets field value -func (o *Peer) SetLocation(v string) { +// SetName sets field value +func (o *Peer) SetName(v string) { - o.Location = &v + o.Name = &v } -// HasLocation returns a boolean if a field has been set. -func (o *Peer) HasLocation() bool { - if o != nil && o.Location != nil { +// HasName returns a boolean if a field has been set. +func (o *Peer) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -233,21 +233,26 @@ func (o *Peer) HasLocation() bool { func (o Peer) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Name != nil { - toSerialize["name"] = o.Name - } if o.DatacenterId != nil { toSerialize["datacenterId"] = o.DatacenterId } + if o.DatacenterName != nil { toSerialize["datacenterName"] = o.DatacenterName } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Location != nil { toSerialize["location"] = o.Location } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect.go index f0aa7eb8f..1a417e85c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect.go @@ -16,14 +16,14 @@ import ( // PrivateCrossConnect struct for PrivateCrossConnect type PrivateCrossConnect struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *PrivateCrossConnectProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewPrivateCrossConnect instantiates a new PrivateCrossConnect object @@ -46,190 +46,190 @@ func NewPrivateCrossConnectWithDefaults() *PrivateCrossConnect { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PrivateCrossConnect) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnect) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnect) GetIdOk() (*string, bool) { +func (o *PrivateCrossConnect) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *PrivateCrossConnect) SetId(v string) { +// SetHref sets field value +func (o *PrivateCrossConnect) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *PrivateCrossConnect) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *PrivateCrossConnect) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *PrivateCrossConnect) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnect) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnect) GetTypeOk() (*Type, bool) { +func (o *PrivateCrossConnect) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *PrivateCrossConnect) SetType(v Type) { +// SetId sets field value +func (o *PrivateCrossConnect) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *PrivateCrossConnect) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *PrivateCrossConnect) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PrivateCrossConnect) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnect) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnect) GetHrefOk() (*string, bool) { +func (o *PrivateCrossConnect) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *PrivateCrossConnect) SetHref(v string) { +// SetMetadata sets field value +func (o *PrivateCrossConnect) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *PrivateCrossConnect) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *PrivateCrossConnect) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *PrivateCrossConnect) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnect) GetProperties() *PrivateCrossConnectProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnect) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *PrivateCrossConnect) GetPropertiesOk() (*PrivateCrossConnectProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *PrivateCrossConnect) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *PrivateCrossConnect) SetProperties(v PrivateCrossConnectProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *PrivateCrossConnect) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *PrivateCrossConnect) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for PrivateCrossConnectProperties will be returned -func (o *PrivateCrossConnect) GetProperties() *PrivateCrossConnectProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnect) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnect) GetPropertiesOk() (*PrivateCrossConnectProperties, bool) { +func (o *PrivateCrossConnect) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *PrivateCrossConnect) SetProperties(v PrivateCrossConnectProperties) { +// SetType sets field value +func (o *PrivateCrossConnect) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *PrivateCrossConnect) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *PrivateCrossConnect) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *PrivateCrossConnect) HasProperties() bool { func (o PrivateCrossConnect) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect_properties.go index a92dc8098..864e5ada1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connect_properties.go @@ -16,14 +16,14 @@ import ( // PrivateCrossConnectProperties struct for PrivateCrossConnectProperties type PrivateCrossConnectProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` + // Read-Only attribute. Lists data centers that can be joined to this private Cross-Connect. + ConnectableDatacenters *[]ConnectableDatacenter `json:"connectableDatacenters,omitempty"` // Human-readable description. Description *string `json:"description,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` // Read-Only attribute. Lists LAN's joined to this private Cross-Connect. Peers *[]Peer `json:"peers,omitempty"` - // Read-Only attribute. Lists data centers that can be joined to this private Cross-Connect. - ConnectableDatacenters *[]ConnectableDatacenter `json:"connectableDatacenters,omitempty"` } // NewPrivateCrossConnectProperties instantiates a new PrivateCrossConnectProperties object @@ -44,38 +44,38 @@ func NewPrivateCrossConnectPropertiesWithDefaults() *PrivateCrossConnectProperti return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PrivateCrossConnectProperties) GetName() *string { +// GetConnectableDatacenters returns the ConnectableDatacenters field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnectProperties) GetConnectableDatacenters() *[]ConnectableDatacenter { if o == nil { return nil } - return o.Name + return o.ConnectableDatacenters } -// GetNameOk returns a tuple with the Name field value +// GetConnectableDatacentersOk returns a tuple with the ConnectableDatacenters field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnectProperties) GetNameOk() (*string, bool) { +func (o *PrivateCrossConnectProperties) GetConnectableDatacentersOk() (*[]ConnectableDatacenter, bool) { if o == nil { return nil, false } - return o.Name, true + return o.ConnectableDatacenters, true } -// SetName sets field value -func (o *PrivateCrossConnectProperties) SetName(v string) { +// SetConnectableDatacenters sets field value +func (o *PrivateCrossConnectProperties) SetConnectableDatacenters(v []ConnectableDatacenter) { - o.Name = &v + o.ConnectableDatacenters = &v } -// HasName returns a boolean if a field has been set. -func (o *PrivateCrossConnectProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasConnectableDatacenters returns a boolean if a field has been set. +func (o *PrivateCrossConnectProperties) HasConnectableDatacenters() bool { + if o != nil && o.ConnectableDatacenters != nil { return true } @@ -83,7 +83,7 @@ func (o *PrivateCrossConnectProperties) HasName() bool { } // GetDescription returns the Description field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *PrivateCrossConnectProperties) GetDescription() *string { if o == nil { return nil @@ -120,76 +120,76 @@ func (o *PrivateCrossConnectProperties) HasDescription() bool { return false } -// GetPeers returns the Peers field value -// If the value is explicit nil, the zero value for []Peer will be returned -func (o *PrivateCrossConnectProperties) GetPeers() *[]Peer { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnectProperties) GetName() *string { if o == nil { return nil } - return o.Peers + return o.Name } -// GetPeersOk returns a tuple with the Peers field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnectProperties) GetPeersOk() (*[]Peer, bool) { +func (o *PrivateCrossConnectProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Peers, true + return o.Name, true } -// SetPeers sets field value -func (o *PrivateCrossConnectProperties) SetPeers(v []Peer) { +// SetName sets field value +func (o *PrivateCrossConnectProperties) SetName(v string) { - o.Peers = &v + o.Name = &v } -// HasPeers returns a boolean if a field has been set. -func (o *PrivateCrossConnectProperties) HasPeers() bool { - if o != nil && o.Peers != nil { +// HasName returns a boolean if a field has been set. +func (o *PrivateCrossConnectProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetConnectableDatacenters returns the ConnectableDatacenters field value -// If the value is explicit nil, the zero value for []ConnectableDatacenter will be returned -func (o *PrivateCrossConnectProperties) GetConnectableDatacenters() *[]ConnectableDatacenter { +// GetPeers returns the Peers field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnectProperties) GetPeers() *[]Peer { if o == nil { return nil } - return o.ConnectableDatacenters + return o.Peers } -// GetConnectableDatacentersOk returns a tuple with the ConnectableDatacenters field value +// GetPeersOk returns a tuple with the Peers field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnectProperties) GetConnectableDatacentersOk() (*[]ConnectableDatacenter, bool) { +func (o *PrivateCrossConnectProperties) GetPeersOk() (*[]Peer, bool) { if o == nil { return nil, false } - return o.ConnectableDatacenters, true + return o.Peers, true } -// SetConnectableDatacenters sets field value -func (o *PrivateCrossConnectProperties) SetConnectableDatacenters(v []ConnectableDatacenter) { +// SetPeers sets field value +func (o *PrivateCrossConnectProperties) SetPeers(v []Peer) { - o.ConnectableDatacenters = &v + o.Peers = &v } -// HasConnectableDatacenters returns a boolean if a field has been set. -func (o *PrivateCrossConnectProperties) HasConnectableDatacenters() bool { - if o != nil && o.ConnectableDatacenters != nil { +// HasPeers returns a boolean if a field has been set. +func (o *PrivateCrossConnectProperties) HasPeers() bool { + if o != nil && o.Peers != nil { return true } @@ -198,18 +198,22 @@ func (o *PrivateCrossConnectProperties) HasConnectableDatacenters() bool { func (o PrivateCrossConnectProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.ConnectableDatacenters != nil { + toSerialize["connectableDatacenters"] = o.ConnectableDatacenters } + if o.Description != nil { toSerialize["description"] = o.Description } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + if o.Peers != nil { toSerialize["peers"] = o.Peers } - if o.ConnectableDatacenters != nil { - toSerialize["connectableDatacenters"] = o.ConnectableDatacenters - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connects.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connects.go index 7112f9a35..3c82ad39f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connects.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_private_cross_connects.go @@ -16,14 +16,14 @@ import ( // PrivateCrossConnects struct for PrivateCrossConnects type PrivateCrossConnects struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]PrivateCrossConnect `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewPrivateCrossConnects instantiates a new PrivateCrossConnects object @@ -44,152 +44,152 @@ func NewPrivateCrossConnectsWithDefaults() *PrivateCrossConnects { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PrivateCrossConnects) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnects) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnects) GetIdOk() (*string, bool) { +func (o *PrivateCrossConnects) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *PrivateCrossConnects) SetId(v string) { +// SetHref sets field value +func (o *PrivateCrossConnects) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *PrivateCrossConnects) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *PrivateCrossConnects) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *PrivateCrossConnects) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnects) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnects) GetTypeOk() (*Type, bool) { +func (o *PrivateCrossConnects) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *PrivateCrossConnects) SetType(v Type) { +// SetId sets field value +func (o *PrivateCrossConnects) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *PrivateCrossConnects) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *PrivateCrossConnects) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *PrivateCrossConnects) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnects) GetItems() *[]PrivateCrossConnect { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnects) GetHrefOk() (*string, bool) { +func (o *PrivateCrossConnects) GetItemsOk() (*[]PrivateCrossConnect, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *PrivateCrossConnects) SetHref(v string) { +// SetItems sets field value +func (o *PrivateCrossConnects) SetItems(v []PrivateCrossConnect) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *PrivateCrossConnects) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *PrivateCrossConnects) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []PrivateCrossConnect will be returned -func (o *PrivateCrossConnects) GetItems() *[]PrivateCrossConnect { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *PrivateCrossConnects) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *PrivateCrossConnects) GetItemsOk() (*[]PrivateCrossConnect, bool) { +func (o *PrivateCrossConnects) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *PrivateCrossConnects) SetItems(v []PrivateCrossConnect) { +// SetType sets field value +func (o *PrivateCrossConnects) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *PrivateCrossConnects) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *PrivateCrossConnects) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *PrivateCrossConnects) HasItems() bool { func (o PrivateCrossConnects) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_remote_console_url.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_remote_console_url.go index 4088c7a8c..74f96ce38 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_remote_console_url.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_remote_console_url.go @@ -39,7 +39,7 @@ func NewRemoteConsoleUrlWithDefaults() *RemoteConsoleUrl { } // GetUrl returns the Url field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *RemoteConsoleUrl) GetUrl() *string { if o == nil { return nil @@ -81,6 +81,7 @@ func (o RemoteConsoleUrl) MarshalJSON() ([]byte, error) { if o.Url != nil { toSerialize["url"] = o.Url } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request.go index 5ac4f4851..adc351ff9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request.go @@ -16,14 +16,14 @@ import ( // Request struct for Request type Request struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *RequestMetadata `json:"metadata,omitempty"` Properties *RequestProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewRequest instantiates a new Request object @@ -46,190 +46,190 @@ func NewRequestWithDefaults() *Request { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Request) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Request) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Request) GetIdOk() (*string, bool) { +func (o *Request) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Request) SetId(v string) { +// SetHref sets field value +func (o *Request) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Request) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Request) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Request) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Request) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Request) GetTypeOk() (*Type, bool) { +func (o *Request) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Request) SetType(v Type) { +// SetId sets field value +func (o *Request) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Request) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Request) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Request) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Request) GetMetadata() *RequestMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Request) GetHrefOk() (*string, bool) { +func (o *Request) GetMetadataOk() (*RequestMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Request) SetHref(v string) { +// SetMetadata sets field value +func (o *Request) SetMetadata(v RequestMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Request) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Request) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for RequestMetadata will be returned -func (o *Request) GetMetadata() *RequestMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Request) GetProperties() *RequestProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Request) GetMetadataOk() (*RequestMetadata, bool) { +func (o *Request) GetPropertiesOk() (*RequestProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Request) SetMetadata(v RequestMetadata) { +// SetProperties sets field value +func (o *Request) SetProperties(v RequestProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Request) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Request) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for RequestProperties will be returned -func (o *Request) GetProperties() *RequestProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Request) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Request) GetPropertiesOk() (*RequestProperties, bool) { +func (o *Request) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Request) SetProperties(v RequestProperties) { +// SetType sets field value +func (o *Request) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Request) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Request) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Request) HasProperties() bool { func (o Request) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_metadata.go index 3ea319b31..1b634615e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_metadata.go @@ -17,10 +17,10 @@ import ( // RequestMetadata struct for RequestMetadata type RequestMetadata struct { - // The last time the resource was created. - CreatedDate *IonosTime // The user who created the resource. CreatedBy *string `json:"createdBy,omitempty"` + // The last time the resource was created. + CreatedDate *IonosTime // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. Etag *string `json:"etag,omitempty"` RequestStatus *RequestStatus `json:"requestStatus,omitempty"` @@ -44,83 +44,83 @@ func NewRequestMetadataWithDefaults() *RequestMetadata { return &this } -// GetCreatedDate returns the CreatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *RequestMetadata) GetCreatedDate() *time.Time { +// GetCreatedBy returns the CreatedBy field value +// If the value is explicit nil, nil is returned +func (o *RequestMetadata) GetCreatedBy() *string { if o == nil { return nil } - if o.CreatedDate == nil { - return nil - } - return &o.CreatedDate.Time + return o.CreatedBy } -// GetCreatedDateOk returns a tuple with the CreatedDate field value +// GetCreatedByOk returns a tuple with the CreatedBy field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestMetadata) GetCreatedDateOk() (*time.Time, bool) { +func (o *RequestMetadata) GetCreatedByOk() (*string, bool) { if o == nil { return nil, false } - if o.CreatedDate == nil { - return nil, false - } - return &o.CreatedDate.Time, true - + return o.CreatedBy, true } -// SetCreatedDate sets field value -func (o *RequestMetadata) SetCreatedDate(v time.Time) { +// SetCreatedBy sets field value +func (o *RequestMetadata) SetCreatedBy(v string) { - o.CreatedDate = &IonosTime{v} + o.CreatedBy = &v } -// HasCreatedDate returns a boolean if a field has been set. -func (o *RequestMetadata) HasCreatedDate() bool { - if o != nil && o.CreatedDate != nil { +// HasCreatedBy returns a boolean if a field has been set. +func (o *RequestMetadata) HasCreatedBy() bool { + if o != nil && o.CreatedBy != nil { return true } return false } -// GetCreatedBy returns the CreatedBy field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestMetadata) GetCreatedBy() *string { +// GetCreatedDate returns the CreatedDate field value +// If the value is explicit nil, nil is returned +func (o *RequestMetadata) GetCreatedDate() *time.Time { if o == nil { return nil } - return o.CreatedBy + if o.CreatedDate == nil { + return nil + } + return &o.CreatedDate.Time } -// GetCreatedByOk returns a tuple with the CreatedBy field value +// GetCreatedDateOk returns a tuple with the CreatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestMetadata) GetCreatedByOk() (*string, bool) { +func (o *RequestMetadata) GetCreatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - return o.CreatedBy, true + if o.CreatedDate == nil { + return nil, false + } + return &o.CreatedDate.Time, true + } -// SetCreatedBy sets field value -func (o *RequestMetadata) SetCreatedBy(v string) { +// SetCreatedDate sets field value +func (o *RequestMetadata) SetCreatedDate(v time.Time) { - o.CreatedBy = &v + o.CreatedDate = &IonosTime{v} } -// HasCreatedBy returns a boolean if a field has been set. -func (o *RequestMetadata) HasCreatedBy() bool { - if o != nil && o.CreatedBy != nil { +// HasCreatedDate returns a boolean if a field has been set. +func (o *RequestMetadata) HasCreatedDate() bool { + if o != nil && o.CreatedDate != nil { return true } @@ -128,7 +128,7 @@ func (o *RequestMetadata) HasCreatedBy() bool { } // GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *RequestMetadata) GetEtag() *string { if o == nil { return nil @@ -166,7 +166,7 @@ func (o *RequestMetadata) HasEtag() bool { } // GetRequestStatus returns the RequestStatus field value -// If the value is explicit nil, the zero value for RequestStatus will be returned +// If the value is explicit nil, nil is returned func (o *RequestMetadata) GetRequestStatus() *RequestStatus { if o == nil { return nil @@ -205,18 +205,22 @@ func (o *RequestMetadata) HasRequestStatus() bool { func (o RequestMetadata) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.CreatedDate != nil { - toSerialize["createdDate"] = o.CreatedDate - } if o.CreatedBy != nil { toSerialize["createdBy"] = o.CreatedBy } + + if o.CreatedDate != nil { + toSerialize["createdDate"] = o.CreatedDate + } + if o.Etag != nil { toSerialize["etag"] = o.Etag } + if o.RequestStatus != nil { toSerialize["requestStatus"] = o.RequestStatus } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_properties.go index 94d040939..e43b39d98 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_properties.go @@ -16,9 +16,9 @@ import ( // RequestProperties struct for RequestProperties type RequestProperties struct { - Method *string `json:"method,omitempty"` - Headers *map[string]string `json:"headers,omitempty"` Body *string `json:"body,omitempty"` + Headers *map[string]string `json:"headers,omitempty"` + Method *string `json:"method,omitempty"` Url *string `json:"url,omitempty"` } @@ -40,38 +40,38 @@ func NewRequestPropertiesWithDefaults() *RequestProperties { return &this } -// GetMethod returns the Method field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestProperties) GetMethod() *string { +// GetBody returns the Body field value +// If the value is explicit nil, nil is returned +func (o *RequestProperties) GetBody() *string { if o == nil { return nil } - return o.Method + return o.Body } -// GetMethodOk returns a tuple with the Method field value +// GetBodyOk returns a tuple with the Body field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestProperties) GetMethodOk() (*string, bool) { +func (o *RequestProperties) GetBodyOk() (*string, bool) { if o == nil { return nil, false } - return o.Method, true + return o.Body, true } -// SetMethod sets field value -func (o *RequestProperties) SetMethod(v string) { +// SetBody sets field value +func (o *RequestProperties) SetBody(v string) { - o.Method = &v + o.Body = &v } -// HasMethod returns a boolean if a field has been set. -func (o *RequestProperties) HasMethod() bool { - if o != nil && o.Method != nil { +// HasBody returns a boolean if a field has been set. +func (o *RequestProperties) HasBody() bool { + if o != nil && o.Body != nil { return true } @@ -79,7 +79,7 @@ func (o *RequestProperties) HasMethod() bool { } // GetHeaders returns the Headers field value -// If the value is explicit nil, the zero value for map[string]string will be returned +// If the value is explicit nil, nil is returned func (o *RequestProperties) GetHeaders() *map[string]string { if o == nil { return nil @@ -116,38 +116,38 @@ func (o *RequestProperties) HasHeaders() bool { return false } -// GetBody returns the Body field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestProperties) GetBody() *string { +// GetMethod returns the Method field value +// If the value is explicit nil, nil is returned +func (o *RequestProperties) GetMethod() *string { if o == nil { return nil } - return o.Body + return o.Method } -// GetBodyOk returns a tuple with the Body field value +// GetMethodOk returns a tuple with the Method field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestProperties) GetBodyOk() (*string, bool) { +func (o *RequestProperties) GetMethodOk() (*string, bool) { if o == nil { return nil, false } - return o.Body, true + return o.Method, true } -// SetBody sets field value -func (o *RequestProperties) SetBody(v string) { +// SetMethod sets field value +func (o *RequestProperties) SetMethod(v string) { - o.Body = &v + o.Method = &v } -// HasBody returns a boolean if a field has been set. -func (o *RequestProperties) HasBody() bool { - if o != nil && o.Body != nil { +// HasMethod returns a boolean if a field has been set. +func (o *RequestProperties) HasMethod() bool { + if o != nil && o.Method != nil { return true } @@ -155,7 +155,7 @@ func (o *RequestProperties) HasBody() bool { } // GetUrl returns the Url field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *RequestProperties) GetUrl() *string { if o == nil { return nil @@ -194,18 +194,22 @@ func (o *RequestProperties) HasUrl() bool { func (o RequestProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Method != nil { - toSerialize["method"] = o.Method + if o.Body != nil { + toSerialize["body"] = o.Body } + if o.Headers != nil { toSerialize["headers"] = o.Headers } - if o.Body != nil { - toSerialize["body"] = o.Body + + if o.Method != nil { + toSerialize["method"] = o.Method } + if o.Url != nil { toSerialize["url"] = o.Url } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status.go index e772df8e6..f4ca3ceef 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status.go @@ -16,13 +16,13 @@ import ( // RequestStatus struct for RequestStatus type RequestStatus struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Metadata *RequestStatusMetadata `json:"metadata,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Metadata *RequestStatusMetadata `json:"metadata,omitempty"` } // NewRequestStatus instantiates a new RequestStatus object @@ -43,152 +43,152 @@ func NewRequestStatusWithDefaults() *RequestStatus { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestStatus) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *RequestStatus) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestStatus) GetIdOk() (*string, bool) { +func (o *RequestStatus) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *RequestStatus) SetId(v string) { +// SetHref sets field value +func (o *RequestStatus) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *RequestStatus) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *RequestStatus) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *RequestStatus) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *RequestStatus) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestStatus) GetTypeOk() (*Type, bool) { +func (o *RequestStatus) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *RequestStatus) SetType(v Type) { +// SetId sets field value +func (o *RequestStatus) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *RequestStatus) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *RequestStatus) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestStatus) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *RequestStatus) GetMetadata() *RequestStatusMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestStatus) GetHrefOk() (*string, bool) { +func (o *RequestStatus) GetMetadataOk() (*RequestStatusMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *RequestStatus) SetHref(v string) { +// SetMetadata sets field value +func (o *RequestStatus) SetMetadata(v RequestStatusMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *RequestStatus) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *RequestStatus) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for RequestStatusMetadata will be returned -func (o *RequestStatus) GetMetadata() *RequestStatusMetadata { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *RequestStatus) GetType() *Type { if o == nil { return nil } - return o.Metadata + return o.Type } -// GetMetadataOk returns a tuple with the Metadata field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestStatus) GetMetadataOk() (*RequestStatusMetadata, bool) { +func (o *RequestStatus) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Type, true } -// SetMetadata sets field value -func (o *RequestStatus) SetMetadata(v RequestStatusMetadata) { +// SetType sets field value +func (o *RequestStatus) SetType(v Type) { - o.Metadata = &v + o.Type = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *RequestStatus) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasType returns a boolean if a field has been set. +func (o *RequestStatus) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -197,18 +197,22 @@ func (o *RequestStatus) HasMetadata() bool { func (o RequestStatus) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status_metadata.go index ef97dbedb..29b9967c8 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_status_metadata.go @@ -16,10 +16,10 @@ import ( // RequestStatusMetadata struct for RequestStatusMetadata type RequestStatusMetadata struct { - Status *string `json:"status,omitempty"` - Message *string `json:"message,omitempty"` // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. Etag *string `json:"etag,omitempty"` + Message *string `json:"message,omitempty"` + Status *string `json:"status,omitempty"` Targets *[]RequestTarget `json:"targets,omitempty"` } @@ -41,38 +41,38 @@ func NewRequestStatusMetadataWithDefaults() *RequestStatusMetadata { return &this } -// GetStatus returns the Status field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestStatusMetadata) GetStatus() *string { +// GetEtag returns the Etag field value +// If the value is explicit nil, nil is returned +func (o *RequestStatusMetadata) GetEtag() *string { if o == nil { return nil } - return o.Status + return o.Etag } -// GetStatusOk returns a tuple with the Status field value +// GetEtagOk returns a tuple with the Etag field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestStatusMetadata) GetStatusOk() (*string, bool) { +func (o *RequestStatusMetadata) GetEtagOk() (*string, bool) { if o == nil { return nil, false } - return o.Status, true + return o.Etag, true } -// SetStatus sets field value -func (o *RequestStatusMetadata) SetStatus(v string) { +// SetEtag sets field value +func (o *RequestStatusMetadata) SetEtag(v string) { - o.Status = &v + o.Etag = &v } -// HasStatus returns a boolean if a field has been set. -func (o *RequestStatusMetadata) HasStatus() bool { - if o != nil && o.Status != nil { +// HasEtag returns a boolean if a field has been set. +func (o *RequestStatusMetadata) HasEtag() bool { + if o != nil && o.Etag != nil { return true } @@ -80,7 +80,7 @@ func (o *RequestStatusMetadata) HasStatus() bool { } // GetMessage returns the Message field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *RequestStatusMetadata) GetMessage() *string { if o == nil { return nil @@ -117,38 +117,38 @@ func (o *RequestStatusMetadata) HasMessage() bool { return false } -// GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestStatusMetadata) GetEtag() *string { +// GetStatus returns the Status field value +// If the value is explicit nil, nil is returned +func (o *RequestStatusMetadata) GetStatus() *string { if o == nil { return nil } - return o.Etag + return o.Status } -// GetEtagOk returns a tuple with the Etag field value +// GetStatusOk returns a tuple with the Status field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestStatusMetadata) GetEtagOk() (*string, bool) { +func (o *RequestStatusMetadata) GetStatusOk() (*string, bool) { if o == nil { return nil, false } - return o.Etag, true + return o.Status, true } -// SetEtag sets field value -func (o *RequestStatusMetadata) SetEtag(v string) { +// SetStatus sets field value +func (o *RequestStatusMetadata) SetStatus(v string) { - o.Etag = &v + o.Status = &v } -// HasEtag returns a boolean if a field has been set. -func (o *RequestStatusMetadata) HasEtag() bool { - if o != nil && o.Etag != nil { +// HasStatus returns a boolean if a field has been set. +func (o *RequestStatusMetadata) HasStatus() bool { + if o != nil && o.Status != nil { return true } @@ -156,7 +156,7 @@ func (o *RequestStatusMetadata) HasEtag() bool { } // GetTargets returns the Targets field value -// If the value is explicit nil, the zero value for []RequestTarget will be returned +// If the value is explicit nil, nil is returned func (o *RequestStatusMetadata) GetTargets() *[]RequestTarget { if o == nil { return nil @@ -195,18 +195,22 @@ func (o *RequestStatusMetadata) HasTargets() bool { func (o RequestStatusMetadata) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Status != nil { - toSerialize["status"] = o.Status + if o.Etag != nil { + toSerialize["etag"] = o.Etag } + if o.Message != nil { toSerialize["message"] = o.Message } - if o.Etag != nil { - toSerialize["etag"] = o.Etag + + if o.Status != nil { + toSerialize["status"] = o.Status } + if o.Targets != nil { toSerialize["targets"] = o.Targets } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_target.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_target.go index 590a5dfae..dc56f80ab 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_target.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_request_target.go @@ -16,8 +16,8 @@ import ( // RequestTarget struct for RequestTarget type RequestTarget struct { - Target *ResourceReference `json:"target,omitempty"` Status *string `json:"status,omitempty"` + Target *ResourceReference `json:"target,omitempty"` } // NewRequestTarget instantiates a new RequestTarget object @@ -38,76 +38,76 @@ func NewRequestTargetWithDefaults() *RequestTarget { return &this } -// GetTarget returns the Target field value -// If the value is explicit nil, the zero value for ResourceReference will be returned -func (o *RequestTarget) GetTarget() *ResourceReference { +// GetStatus returns the Status field value +// If the value is explicit nil, nil is returned +func (o *RequestTarget) GetStatus() *string { if o == nil { return nil } - return o.Target + return o.Status } -// GetTargetOk returns a tuple with the Target field value +// GetStatusOk returns a tuple with the Status field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestTarget) GetTargetOk() (*ResourceReference, bool) { +func (o *RequestTarget) GetStatusOk() (*string, bool) { if o == nil { return nil, false } - return o.Target, true + return o.Status, true } -// SetTarget sets field value -func (o *RequestTarget) SetTarget(v ResourceReference) { +// SetStatus sets field value +func (o *RequestTarget) SetStatus(v string) { - o.Target = &v + o.Status = &v } -// HasTarget returns a boolean if a field has been set. -func (o *RequestTarget) HasTarget() bool { - if o != nil && o.Target != nil { +// HasStatus returns a boolean if a field has been set. +func (o *RequestTarget) HasStatus() bool { + if o != nil && o.Status != nil { return true } return false } -// GetStatus returns the Status field value -// If the value is explicit nil, the zero value for string will be returned -func (o *RequestTarget) GetStatus() *string { +// GetTarget returns the Target field value +// If the value is explicit nil, nil is returned +func (o *RequestTarget) GetTarget() *ResourceReference { if o == nil { return nil } - return o.Status + return o.Target } -// GetStatusOk returns a tuple with the Status field value +// GetTargetOk returns a tuple with the Target field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *RequestTarget) GetStatusOk() (*string, bool) { +func (o *RequestTarget) GetTargetOk() (*ResourceReference, bool) { if o == nil { return nil, false } - return o.Status, true + return o.Target, true } -// SetStatus sets field value -func (o *RequestTarget) SetStatus(v string) { +// SetTarget sets field value +func (o *RequestTarget) SetTarget(v ResourceReference) { - o.Status = &v + o.Target = &v } -// HasStatus returns a boolean if a field has been set. -func (o *RequestTarget) HasStatus() bool { - if o != nil && o.Status != nil { +// HasTarget returns a boolean if a field has been set. +func (o *RequestTarget) HasTarget() bool { + if o != nil && o.Target != nil { return true } @@ -116,12 +116,14 @@ func (o *RequestTarget) HasStatus() bool { func (o RequestTarget) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Target != nil { - toSerialize["target"] = o.Target - } if o.Status != nil { toSerialize["status"] = o.Status } + + if o.Target != nil { + toSerialize["target"] = o.Target + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_requests.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_requests.go index 3d67bdc81..0bf44ade8 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_requests.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_requests.go @@ -16,31 +16,31 @@ import ( // Requests struct for Requests type Requests struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Request `json:"items,omitempty"` + // The limit, specified in the request (if not specified, the endpoint's default pagination limit is used). + Limit *float32 `json:"limit"` // The offset, specified in the request (if not is specified, 0 is used by default). Offset *float32 `json:"offset"` - // The limit, specified in the request (if not specified, the endpoint's default pagination limit is used). - Limit *float32 `json:"limit"` - Links *PaginationLinks `json:"_links"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewRequests instantiates a new Requests object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewRequests(offset float32, limit float32, links PaginationLinks) *Requests { +func NewRequests(links PaginationLinks, limit float32, offset float32) *Requests { this := Requests{} - this.Offset = &offset - this.Limit = &limit this.Links = &links + this.Limit = &limit + this.Offset = &offset return &this } @@ -53,114 +53,114 @@ func NewRequestsWithDefaults() *Requests { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Requests) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Requests) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Requests) GetIdOk() (*string, bool) { +func (o *Requests) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Requests) SetId(v string) { +// SetLinks sets field value +func (o *Requests) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Requests) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Requests) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Requests) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Requests) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Requests) GetTypeOk() (*Type, bool) { +func (o *Requests) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Requests) SetType(v Type) { +// SetHref sets field value +func (o *Requests) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Requests) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Requests) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Requests) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Requests) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Requests) GetHrefOk() (*string, bool) { +func (o *Requests) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Requests) SetHref(v string) { +// SetId sets field value +func (o *Requests) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Requests) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Requests) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -168,7 +168,7 @@ func (o *Requests) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Request will be returned +// If the value is explicit nil, nil is returned func (o *Requests) GetItems() *[]Request { if o == nil { return nil @@ -205,114 +205,114 @@ func (o *Requests) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Requests) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Requests) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Requests) GetOffsetOk() (*float32, bool) { +func (o *Requests) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Requests) SetOffset(v float32) { +// SetLimit sets field value +func (o *Requests) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Requests) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Requests) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Requests) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Requests) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Requests) GetLimitOk() (*float32, bool) { +func (o *Requests) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Requests) SetLimit(v float32) { +// SetOffset sets field value +func (o *Requests) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Requests) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Requests) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Requests) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Requests) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Requests) GetLinksOk() (*PaginationLinks, bool) { +func (o *Requests) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Requests) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Requests) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Requests) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Requests) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -321,27 +321,34 @@ func (o *Requests) HasLinks() bool { func (o Requests) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource.go index 1d370ce22..82b8f739d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource.go @@ -16,15 +16,15 @@ import ( // Resource datacenter resource representation type Resource struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` + Entities *ResourceEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ResourceProperties `json:"properties,omitempty"` - Entities *ResourceEntities `json:"entities,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewResource instantiates a new Resource object @@ -45,114 +45,114 @@ func NewResourceWithDefaults() *Resource { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Resource) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Resource) GetEntities() *ResourceEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resource) GetIdOk() (*string, bool) { +func (o *Resource) GetEntitiesOk() (*ResourceEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Resource) SetId(v string) { +// SetEntities sets field value +func (o *Resource) SetEntities(v ResourceEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Resource) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Resource) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Resource) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Resource) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resource) GetTypeOk() (*Type, bool) { +func (o *Resource) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Resource) SetType(v Type) { +// SetHref sets field value +func (o *Resource) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Resource) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Resource) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Resource) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Resource) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resource) GetHrefOk() (*string, bool) { +func (o *Resource) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Resource) SetHref(v string) { +// SetId sets field value +func (o *Resource) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Resource) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Resource) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -160,7 +160,7 @@ func (o *Resource) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *Resource) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -198,7 +198,7 @@ func (o *Resource) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ResourceProperties will be returned +// If the value is explicit nil, nil is returned func (o *Resource) GetProperties() *ResourceProperties { if o == nil { return nil @@ -235,38 +235,38 @@ func (o *Resource) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for ResourceEntities will be returned -func (o *Resource) GetEntities() *ResourceEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Resource) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resource) GetEntitiesOk() (*ResourceEntities, bool) { +func (o *Resource) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Resource) SetEntities(v ResourceEntities) { +// SetType sets field value +func (o *Resource) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Resource) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Resource) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -275,24 +275,30 @@ func (o *Resource) HasEntities() bool { func (o Resource) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_entities.go index cc9f26acb..735ffe73b 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_entities.go @@ -38,7 +38,7 @@ func NewResourceEntitiesWithDefaults() *ResourceEntities { } // GetGroups returns the Groups field value -// If the value is explicit nil, the zero value for ResourceGroups will be returned +// If the value is explicit nil, nil is returned func (o *ResourceEntities) GetGroups() *ResourceGroups { if o == nil { return nil @@ -80,6 +80,7 @@ func (o ResourceEntities) MarshalJSON() ([]byte, error) { if o.Groups != nil { toSerialize["groups"] = o.Groups } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_groups.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_groups.go index ae8939261..8d0a2a1a6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_groups.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_groups.go @@ -16,14 +16,14 @@ import ( // ResourceGroups Resources assigned to this group. type ResourceGroups struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Resource `json:"items,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewResourceGroups instantiates a new ResourceGroups object @@ -44,152 +44,152 @@ func NewResourceGroupsWithDefaults() *ResourceGroups { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ResourceGroups) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ResourceGroups) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceGroups) GetIdOk() (*string, bool) { +func (o *ResourceGroups) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *ResourceGroups) SetId(v string) { +// SetHref sets field value +func (o *ResourceGroups) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *ResourceGroups) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *ResourceGroups) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ResourceGroups) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ResourceGroups) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceGroups) GetTypeOk() (*Type, bool) { +func (o *ResourceGroups) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *ResourceGroups) SetType(v Type) { +// SetId sets field value +func (o *ResourceGroups) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *ResourceGroups) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *ResourceGroups) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ResourceGroups) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *ResourceGroups) GetItems() *[]Resource { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceGroups) GetHrefOk() (*string, bool) { +func (o *ResourceGroups) GetItemsOk() (*[]Resource, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *ResourceGroups) SetHref(v string) { +// SetItems sets field value +func (o *ResourceGroups) SetItems(v []Resource) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *ResourceGroups) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *ResourceGroups) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Resource will be returned -func (o *ResourceGroups) GetItems() *[]Resource { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ResourceGroups) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceGroups) GetItemsOk() (*[]Resource, bool) { +func (o *ResourceGroups) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *ResourceGroups) SetItems(v []Resource) { +// SetType sets field value +func (o *ResourceGroups) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *ResourceGroups) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *ResourceGroups) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *ResourceGroups) HasItems() bool { func (o ResourceGroups) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go index 8dbbe386e..ca9ee8400 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go @@ -16,81 +16,81 @@ import ( // ResourceLimits struct for ResourceLimits type ResourceLimits struct { - // The maximum number of CPU cores per server. - CoresPerServer *int32 `json:"coresPerServer"` // The maximum number of CPU cores per contract. CoresPerContract *int32 `json:"coresPerContract"` + // The maximum number of CPU cores per server. + CoresPerServer *int32 `json:"coresPerServer"` // The number of CPU cores provisioned. CoresProvisioned *int32 `json:"coresProvisioned"` - // The maximum amount of RAM (in MB) that can be provisioned for a particular server under this contract. - RamPerServer *int32 `json:"ramPerServer"` - // The maximum amount of RAM (in MB) that can be provisioned under this contract. - RamPerContract *int32 `json:"ramPerContract"` - // The amount of RAM (in MB) provisioned under this contract. - RamProvisioned *int32 `json:"ramProvisioned"` - // The maximum size (in MB) of an idividual hard disk volume. - HddLimitPerVolume *int64 `json:"hddLimitPerVolume"` + // The amount of DAS disk space (in MB) in a Cube server that is currently provisioned. + DasVolumeProvisioned *int64 `json:"dasVolumeProvisioned"` // The maximum amount of disk space (in MB) that can be provided under this contract. HddLimitPerContract *int64 `json:"hddLimitPerContract"` + // The maximum size (in MB) of an idividual hard disk volume. + HddLimitPerVolume *int64 `json:"hddLimitPerVolume"` // The amount of hard disk space (in MB) that is currently provisioned. HddVolumeProvisioned *int64 `json:"hddVolumeProvisioned"` - // The maximum size (in MB) of an individual solid state disk volume. - SsdLimitPerVolume *int64 `json:"ssdLimitPerVolume"` - // The maximum amount of solid state disk space (in MB) that can be provisioned under this contract. - SsdLimitPerContract *int64 `json:"ssdLimitPerContract"` - // The amount of solid state disk space (in MB) that is currently provisioned. - SsdVolumeProvisioned *int64 `json:"ssdVolumeProvisioned"` - // The amount of DAS disk space (in MB) in a Cube server that is currently provisioned. - DasVolumeProvisioned *int64 `json:"dasVolumeProvisioned"` - // The maximum number of static public IP addresses that can be reserved by this customer across contracts. - ReservableIps *int32 `json:"reservableIps"` - // The maximum number of static public IP addresses that can be reserved for this contract. - ReservedIpsOnContract *int32 `json:"reservedIpsOnContract"` - // The number of static public IP addresses in use. - ReservedIpsInUse *int32 `json:"reservedIpsInUse"` // The maximum number of Kubernetes clusters that can be created under this contract. K8sClusterLimitTotal *int32 `json:"k8sClusterLimitTotal"` // The amount of Kubernetes clusters that is currently provisioned. K8sClustersProvisioned *int32 `json:"k8sClustersProvisioned"` - // The NLB total limit. - NlbLimitTotal *int32 `json:"nlbLimitTotal"` - // The NLBs provisioned. - NlbProvisioned *int32 `json:"nlbProvisioned"` // The NAT Gateway total limit. NatGatewayLimitTotal *int32 `json:"natGatewayLimitTotal"` // The NAT Gateways provisioned. NatGatewayProvisioned *int32 `json:"natGatewayProvisioned"` + // The NLB total limit. + NlbLimitTotal *int32 `json:"nlbLimitTotal"` + // The NLBs provisioned. + NlbProvisioned *int32 `json:"nlbProvisioned"` + // The maximum amount of RAM (in MB) that can be provisioned under this contract. + RamPerContract *int32 `json:"ramPerContract"` + // The maximum amount of RAM (in MB) that can be provisioned for a particular server under this contract. + RamPerServer *int32 `json:"ramPerServer"` + // The amount of RAM (in MB) provisioned under this contract. + RamProvisioned *int32 `json:"ramProvisioned"` + // The maximum number of static public IP addresses that can be reserved by this customer across contracts. + ReservableIps *int32 `json:"reservableIps"` + // The number of static public IP addresses in use. + ReservedIpsInUse *int32 `json:"reservedIpsInUse"` + // The maximum number of static public IP addresses that can be reserved for this contract. + ReservedIpsOnContract *int32 `json:"reservedIpsOnContract"` + // The maximum amount of solid state disk space (in MB) that can be provisioned under this contract. + SsdLimitPerContract *int64 `json:"ssdLimitPerContract"` + // The maximum size (in MB) of an individual solid state disk volume. + SsdLimitPerVolume *int64 `json:"ssdLimitPerVolume"` + // The amount of solid state disk space (in MB) that is currently provisioned. + SsdVolumeProvisioned *int64 `json:"ssdVolumeProvisioned"` } // NewResourceLimits instantiates a new ResourceLimits object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewResourceLimits(coresPerServer int32, coresPerContract int32, coresProvisioned int32, ramPerServer int32, ramPerContract int32, ramProvisioned int32, hddLimitPerVolume int64, hddLimitPerContract int64, hddVolumeProvisioned int64, ssdLimitPerVolume int64, ssdLimitPerContract int64, ssdVolumeProvisioned int64, dasVolumeProvisioned int64, reservableIps int32, reservedIpsOnContract int32, reservedIpsInUse int32, k8sClusterLimitTotal int32, k8sClustersProvisioned int32, nlbLimitTotal int32, nlbProvisioned int32, natGatewayLimitTotal int32, natGatewayProvisioned int32) *ResourceLimits { +func NewResourceLimits(coresPerContract int32, coresPerServer int32, coresProvisioned int32, dasVolumeProvisioned int64, hddLimitPerContract int64, hddLimitPerVolume int64, hddVolumeProvisioned int64, k8sClusterLimitTotal int32, k8sClustersProvisioned int32, natGatewayLimitTotal int32, natGatewayProvisioned int32, nlbLimitTotal int32, nlbProvisioned int32, ramPerContract int32, ramPerServer int32, ramProvisioned int32, reservableIps int32, reservedIpsInUse int32, reservedIpsOnContract int32, ssdLimitPerContract int64, ssdLimitPerVolume int64, ssdVolumeProvisioned int64) *ResourceLimits { this := ResourceLimits{} - this.CoresPerServer = &coresPerServer this.CoresPerContract = &coresPerContract + this.CoresPerServer = &coresPerServer this.CoresProvisioned = &coresProvisioned - this.RamPerServer = &ramPerServer - this.RamPerContract = &ramPerContract - this.RamProvisioned = &ramProvisioned - this.HddLimitPerVolume = &hddLimitPerVolume + this.DasVolumeProvisioned = &dasVolumeProvisioned this.HddLimitPerContract = &hddLimitPerContract + this.HddLimitPerVolume = &hddLimitPerVolume this.HddVolumeProvisioned = &hddVolumeProvisioned - this.SsdLimitPerVolume = &ssdLimitPerVolume - this.SsdLimitPerContract = &ssdLimitPerContract - this.SsdVolumeProvisioned = &ssdVolumeProvisioned - this.DasVolumeProvisioned = &dasVolumeProvisioned - this.ReservableIps = &reservableIps - this.ReservedIpsOnContract = &reservedIpsOnContract - this.ReservedIpsInUse = &reservedIpsInUse this.K8sClusterLimitTotal = &k8sClusterLimitTotal this.K8sClustersProvisioned = &k8sClustersProvisioned - this.NlbLimitTotal = &nlbLimitTotal - this.NlbProvisioned = &nlbProvisioned this.NatGatewayLimitTotal = &natGatewayLimitTotal this.NatGatewayProvisioned = &natGatewayProvisioned + this.NlbLimitTotal = &nlbLimitTotal + this.NlbProvisioned = &nlbProvisioned + this.RamPerContract = &ramPerContract + this.RamPerServer = &ramPerServer + this.RamProvisioned = &ramProvisioned + this.ReservableIps = &reservableIps + this.ReservedIpsInUse = &reservedIpsInUse + this.ReservedIpsOnContract = &reservedIpsOnContract + this.SsdLimitPerContract = &ssdLimitPerContract + this.SsdLimitPerVolume = &ssdLimitPerVolume + this.SsdVolumeProvisioned = &ssdVolumeProvisioned return &this } @@ -103,76 +103,76 @@ func NewResourceLimitsWithDefaults() *ResourceLimits { return &this } -// GetCoresPerServer returns the CoresPerServer field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetCoresPerServer() *int32 { +// GetCoresPerContract returns the CoresPerContract field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetCoresPerContract() *int32 { if o == nil { return nil } - return o.CoresPerServer + return o.CoresPerContract } -// GetCoresPerServerOk returns a tuple with the CoresPerServer field value +// GetCoresPerContractOk returns a tuple with the CoresPerContract field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetCoresPerServerOk() (*int32, bool) { +func (o *ResourceLimits) GetCoresPerContractOk() (*int32, bool) { if o == nil { return nil, false } - return o.CoresPerServer, true + return o.CoresPerContract, true } -// SetCoresPerServer sets field value -func (o *ResourceLimits) SetCoresPerServer(v int32) { +// SetCoresPerContract sets field value +func (o *ResourceLimits) SetCoresPerContract(v int32) { - o.CoresPerServer = &v + o.CoresPerContract = &v } -// HasCoresPerServer returns a boolean if a field has been set. -func (o *ResourceLimits) HasCoresPerServer() bool { - if o != nil && o.CoresPerServer != nil { +// HasCoresPerContract returns a boolean if a field has been set. +func (o *ResourceLimits) HasCoresPerContract() bool { + if o != nil && o.CoresPerContract != nil { return true } return false } -// GetCoresPerContract returns the CoresPerContract field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetCoresPerContract() *int32 { +// GetCoresPerServer returns the CoresPerServer field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetCoresPerServer() *int32 { if o == nil { return nil } - return o.CoresPerContract + return o.CoresPerServer } -// GetCoresPerContractOk returns a tuple with the CoresPerContract field value +// GetCoresPerServerOk returns a tuple with the CoresPerServer field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetCoresPerContractOk() (*int32, bool) { +func (o *ResourceLimits) GetCoresPerServerOk() (*int32, bool) { if o == nil { return nil, false } - return o.CoresPerContract, true + return o.CoresPerServer, true } -// SetCoresPerContract sets field value -func (o *ResourceLimits) SetCoresPerContract(v int32) { +// SetCoresPerServer sets field value +func (o *ResourceLimits) SetCoresPerServer(v int32) { - o.CoresPerContract = &v + o.CoresPerServer = &v } -// HasCoresPerContract returns a boolean if a field has been set. -func (o *ResourceLimits) HasCoresPerContract() bool { - if o != nil && o.CoresPerContract != nil { +// HasCoresPerServer returns a boolean if a field has been set. +func (o *ResourceLimits) HasCoresPerServer() bool { + if o != nil && o.CoresPerServer != nil { return true } @@ -180,7 +180,7 @@ func (o *ResourceLimits) HasCoresPerContract() bool { } // GetCoresProvisioned returns the CoresProvisioned field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *ResourceLimits) GetCoresProvisioned() *int32 { if o == nil { return nil @@ -217,722 +217,722 @@ func (o *ResourceLimits) HasCoresProvisioned() bool { return false } -// GetRamPerServer returns the RamPerServer field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetRamPerServer() *int32 { +// GetDasVolumeProvisioned returns the DasVolumeProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetDasVolumeProvisioned() *int64 { if o == nil { return nil } - return o.RamPerServer + return o.DasVolumeProvisioned } -// GetRamPerServerOk returns a tuple with the RamPerServer field value +// GetDasVolumeProvisionedOk returns a tuple with the DasVolumeProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetRamPerServerOk() (*int32, bool) { +func (o *ResourceLimits) GetDasVolumeProvisionedOk() (*int64, bool) { if o == nil { return nil, false } - return o.RamPerServer, true + return o.DasVolumeProvisioned, true } -// SetRamPerServer sets field value -func (o *ResourceLimits) SetRamPerServer(v int32) { +// SetDasVolumeProvisioned sets field value +func (o *ResourceLimits) SetDasVolumeProvisioned(v int64) { - o.RamPerServer = &v + o.DasVolumeProvisioned = &v } -// HasRamPerServer returns a boolean if a field has been set. -func (o *ResourceLimits) HasRamPerServer() bool { - if o != nil && o.RamPerServer != nil { +// HasDasVolumeProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasDasVolumeProvisioned() bool { + if o != nil && o.DasVolumeProvisioned != nil { return true } return false } -// GetRamPerContract returns the RamPerContract field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetRamPerContract() *int32 { +// GetHddLimitPerContract returns the HddLimitPerContract field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetHddLimitPerContract() *int64 { if o == nil { return nil } - return o.RamPerContract + return o.HddLimitPerContract } -// GetRamPerContractOk returns a tuple with the RamPerContract field value +// GetHddLimitPerContractOk returns a tuple with the HddLimitPerContract field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetRamPerContractOk() (*int32, bool) { +func (o *ResourceLimits) GetHddLimitPerContractOk() (*int64, bool) { if o == nil { return nil, false } - return o.RamPerContract, true + return o.HddLimitPerContract, true } -// SetRamPerContract sets field value -func (o *ResourceLimits) SetRamPerContract(v int32) { +// SetHddLimitPerContract sets field value +func (o *ResourceLimits) SetHddLimitPerContract(v int64) { - o.RamPerContract = &v + o.HddLimitPerContract = &v } -// HasRamPerContract returns a boolean if a field has been set. -func (o *ResourceLimits) HasRamPerContract() bool { - if o != nil && o.RamPerContract != nil { +// HasHddLimitPerContract returns a boolean if a field has been set. +func (o *ResourceLimits) HasHddLimitPerContract() bool { + if o != nil && o.HddLimitPerContract != nil { return true } return false } -// GetRamProvisioned returns the RamProvisioned field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetRamProvisioned() *int32 { +// GetHddLimitPerVolume returns the HddLimitPerVolume field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetHddLimitPerVolume() *int64 { if o == nil { return nil } - return o.RamProvisioned + return o.HddLimitPerVolume } -// GetRamProvisionedOk returns a tuple with the RamProvisioned field value +// GetHddLimitPerVolumeOk returns a tuple with the HddLimitPerVolume field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetRamProvisionedOk() (*int32, bool) { +func (o *ResourceLimits) GetHddLimitPerVolumeOk() (*int64, bool) { if o == nil { return nil, false } - return o.RamProvisioned, true + return o.HddLimitPerVolume, true } -// SetRamProvisioned sets field value -func (o *ResourceLimits) SetRamProvisioned(v int32) { +// SetHddLimitPerVolume sets field value +func (o *ResourceLimits) SetHddLimitPerVolume(v int64) { - o.RamProvisioned = &v + o.HddLimitPerVolume = &v } -// HasRamProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasRamProvisioned() bool { - if o != nil && o.RamProvisioned != nil { +// HasHddLimitPerVolume returns a boolean if a field has been set. +func (o *ResourceLimits) HasHddLimitPerVolume() bool { + if o != nil && o.HddLimitPerVolume != nil { return true } return false } -// GetHddLimitPerVolume returns the HddLimitPerVolume field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetHddLimitPerVolume() *int64 { +// GetHddVolumeProvisioned returns the HddVolumeProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetHddVolumeProvisioned() *int64 { if o == nil { return nil } - return o.HddLimitPerVolume + return o.HddVolumeProvisioned } -// GetHddLimitPerVolumeOk returns a tuple with the HddLimitPerVolume field value +// GetHddVolumeProvisionedOk returns a tuple with the HddVolumeProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetHddLimitPerVolumeOk() (*int64, bool) { +func (o *ResourceLimits) GetHddVolumeProvisionedOk() (*int64, bool) { if o == nil { return nil, false } - return o.HddLimitPerVolume, true + return o.HddVolumeProvisioned, true } -// SetHddLimitPerVolume sets field value -func (o *ResourceLimits) SetHddLimitPerVolume(v int64) { +// SetHddVolumeProvisioned sets field value +func (o *ResourceLimits) SetHddVolumeProvisioned(v int64) { - o.HddLimitPerVolume = &v + o.HddVolumeProvisioned = &v } -// HasHddLimitPerVolume returns a boolean if a field has been set. -func (o *ResourceLimits) HasHddLimitPerVolume() bool { - if o != nil && o.HddLimitPerVolume != nil { +// HasHddVolumeProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasHddVolumeProvisioned() bool { + if o != nil && o.HddVolumeProvisioned != nil { return true } return false } -// GetHddLimitPerContract returns the HddLimitPerContract field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetHddLimitPerContract() *int64 { +// GetK8sClusterLimitTotal returns the K8sClusterLimitTotal field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetK8sClusterLimitTotal() *int32 { if o == nil { return nil } - return o.HddLimitPerContract + return o.K8sClusterLimitTotal } -// GetHddLimitPerContractOk returns a tuple with the HddLimitPerContract field value +// GetK8sClusterLimitTotalOk returns a tuple with the K8sClusterLimitTotal field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetHddLimitPerContractOk() (*int64, bool) { +func (o *ResourceLimits) GetK8sClusterLimitTotalOk() (*int32, bool) { if o == nil { return nil, false } - return o.HddLimitPerContract, true + return o.K8sClusterLimitTotal, true } -// SetHddLimitPerContract sets field value -func (o *ResourceLimits) SetHddLimitPerContract(v int64) { +// SetK8sClusterLimitTotal sets field value +func (o *ResourceLimits) SetK8sClusterLimitTotal(v int32) { - o.HddLimitPerContract = &v + o.K8sClusterLimitTotal = &v } -// HasHddLimitPerContract returns a boolean if a field has been set. -func (o *ResourceLimits) HasHddLimitPerContract() bool { - if o != nil && o.HddLimitPerContract != nil { +// HasK8sClusterLimitTotal returns a boolean if a field has been set. +func (o *ResourceLimits) HasK8sClusterLimitTotal() bool { + if o != nil && o.K8sClusterLimitTotal != nil { return true } return false } -// GetHddVolumeProvisioned returns the HddVolumeProvisioned field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetHddVolumeProvisioned() *int64 { +// GetK8sClustersProvisioned returns the K8sClustersProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetK8sClustersProvisioned() *int32 { if o == nil { return nil } - return o.HddVolumeProvisioned + return o.K8sClustersProvisioned } -// GetHddVolumeProvisionedOk returns a tuple with the HddVolumeProvisioned field value +// GetK8sClustersProvisionedOk returns a tuple with the K8sClustersProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetHddVolumeProvisionedOk() (*int64, bool) { +func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) { if o == nil { return nil, false } - return o.HddVolumeProvisioned, true + return o.K8sClustersProvisioned, true } -// SetHddVolumeProvisioned sets field value -func (o *ResourceLimits) SetHddVolumeProvisioned(v int64) { +// SetK8sClustersProvisioned sets field value +func (o *ResourceLimits) SetK8sClustersProvisioned(v int32) { - o.HddVolumeProvisioned = &v + o.K8sClustersProvisioned = &v } -// HasHddVolumeProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasHddVolumeProvisioned() bool { - if o != nil && o.HddVolumeProvisioned != nil { +// HasK8sClustersProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasK8sClustersProvisioned() bool { + if o != nil && o.K8sClustersProvisioned != nil { return true } return false } -// GetSsdLimitPerVolume returns the SsdLimitPerVolume field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetSsdLimitPerVolume() *int64 { +// GetNatGatewayLimitTotal returns the NatGatewayLimitTotal field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetNatGatewayLimitTotal() *int32 { if o == nil { return nil } - return o.SsdLimitPerVolume + return o.NatGatewayLimitTotal } -// GetSsdLimitPerVolumeOk returns a tuple with the SsdLimitPerVolume field value +// GetNatGatewayLimitTotalOk returns a tuple with the NatGatewayLimitTotal field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetSsdLimitPerVolumeOk() (*int64, bool) { +func (o *ResourceLimits) GetNatGatewayLimitTotalOk() (*int32, bool) { if o == nil { return nil, false } - return o.SsdLimitPerVolume, true + return o.NatGatewayLimitTotal, true } -// SetSsdLimitPerVolume sets field value -func (o *ResourceLimits) SetSsdLimitPerVolume(v int64) { +// SetNatGatewayLimitTotal sets field value +func (o *ResourceLimits) SetNatGatewayLimitTotal(v int32) { - o.SsdLimitPerVolume = &v + o.NatGatewayLimitTotal = &v } -// HasSsdLimitPerVolume returns a boolean if a field has been set. -func (o *ResourceLimits) HasSsdLimitPerVolume() bool { - if o != nil && o.SsdLimitPerVolume != nil { +// HasNatGatewayLimitTotal returns a boolean if a field has been set. +func (o *ResourceLimits) HasNatGatewayLimitTotal() bool { + if o != nil && o.NatGatewayLimitTotal != nil { return true } return false } -// GetSsdLimitPerContract returns the SsdLimitPerContract field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetSsdLimitPerContract() *int64 { +// GetNatGatewayProvisioned returns the NatGatewayProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetNatGatewayProvisioned() *int32 { if o == nil { return nil } - return o.SsdLimitPerContract + return o.NatGatewayProvisioned } -// GetSsdLimitPerContractOk returns a tuple with the SsdLimitPerContract field value +// GetNatGatewayProvisionedOk returns a tuple with the NatGatewayProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetSsdLimitPerContractOk() (*int64, bool) { +func (o *ResourceLimits) GetNatGatewayProvisionedOk() (*int32, bool) { if o == nil { return nil, false } - return o.SsdLimitPerContract, true + return o.NatGatewayProvisioned, true } -// SetSsdLimitPerContract sets field value -func (o *ResourceLimits) SetSsdLimitPerContract(v int64) { +// SetNatGatewayProvisioned sets field value +func (o *ResourceLimits) SetNatGatewayProvisioned(v int32) { - o.SsdLimitPerContract = &v + o.NatGatewayProvisioned = &v } -// HasSsdLimitPerContract returns a boolean if a field has been set. -func (o *ResourceLimits) HasSsdLimitPerContract() bool { - if o != nil && o.SsdLimitPerContract != nil { +// HasNatGatewayProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasNatGatewayProvisioned() bool { + if o != nil && o.NatGatewayProvisioned != nil { return true } return false } -// GetSsdVolumeProvisioned returns the SsdVolumeProvisioned field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetSsdVolumeProvisioned() *int64 { +// GetNlbLimitTotal returns the NlbLimitTotal field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetNlbLimitTotal() *int32 { if o == nil { return nil } - return o.SsdVolumeProvisioned + return o.NlbLimitTotal } -// GetSsdVolumeProvisionedOk returns a tuple with the SsdVolumeProvisioned field value +// GetNlbLimitTotalOk returns a tuple with the NlbLimitTotal field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetSsdVolumeProvisionedOk() (*int64, bool) { +func (o *ResourceLimits) GetNlbLimitTotalOk() (*int32, bool) { if o == nil { return nil, false } - return o.SsdVolumeProvisioned, true + return o.NlbLimitTotal, true } -// SetSsdVolumeProvisioned sets field value -func (o *ResourceLimits) SetSsdVolumeProvisioned(v int64) { +// SetNlbLimitTotal sets field value +func (o *ResourceLimits) SetNlbLimitTotal(v int32) { - o.SsdVolumeProvisioned = &v + o.NlbLimitTotal = &v } -// HasSsdVolumeProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasSsdVolumeProvisioned() bool { - if o != nil && o.SsdVolumeProvisioned != nil { +// HasNlbLimitTotal returns a boolean if a field has been set. +func (o *ResourceLimits) HasNlbLimitTotal() bool { + if o != nil && o.NlbLimitTotal != nil { return true } return false } -// GetDasVolumeProvisioned returns the DasVolumeProvisioned field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *ResourceLimits) GetDasVolumeProvisioned() *int64 { +// GetNlbProvisioned returns the NlbProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetNlbProvisioned() *int32 { if o == nil { return nil } - return o.DasVolumeProvisioned + return o.NlbProvisioned } -// GetDasVolumeProvisionedOk returns a tuple with the DasVolumeProvisioned field value +// GetNlbProvisionedOk returns a tuple with the NlbProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetDasVolumeProvisionedOk() (*int64, bool) { +func (o *ResourceLimits) GetNlbProvisionedOk() (*int32, bool) { if o == nil { return nil, false } - return o.DasVolumeProvisioned, true + return o.NlbProvisioned, true } -// SetDasVolumeProvisioned sets field value -func (o *ResourceLimits) SetDasVolumeProvisioned(v int64) { +// SetNlbProvisioned sets field value +func (o *ResourceLimits) SetNlbProvisioned(v int32) { - o.DasVolumeProvisioned = &v + o.NlbProvisioned = &v } -// HasDasVolumeProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasDasVolumeProvisioned() bool { - if o != nil && o.DasVolumeProvisioned != nil { +// HasNlbProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasNlbProvisioned() bool { + if o != nil && o.NlbProvisioned != nil { return true } return false } -// GetReservableIps returns the ReservableIps field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetReservableIps() *int32 { +// GetRamPerContract returns the RamPerContract field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetRamPerContract() *int32 { if o == nil { return nil } - return o.ReservableIps + return o.RamPerContract } -// GetReservableIpsOk returns a tuple with the ReservableIps field value +// GetRamPerContractOk returns a tuple with the RamPerContract field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetReservableIpsOk() (*int32, bool) { +func (o *ResourceLimits) GetRamPerContractOk() (*int32, bool) { if o == nil { return nil, false } - return o.ReservableIps, true + return o.RamPerContract, true } -// SetReservableIps sets field value -func (o *ResourceLimits) SetReservableIps(v int32) { +// SetRamPerContract sets field value +func (o *ResourceLimits) SetRamPerContract(v int32) { - o.ReservableIps = &v + o.RamPerContract = &v } -// HasReservableIps returns a boolean if a field has been set. -func (o *ResourceLimits) HasReservableIps() bool { - if o != nil && o.ReservableIps != nil { +// HasRamPerContract returns a boolean if a field has been set. +func (o *ResourceLimits) HasRamPerContract() bool { + if o != nil && o.RamPerContract != nil { return true } return false } -// GetReservedIpsOnContract returns the ReservedIpsOnContract field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetReservedIpsOnContract() *int32 { +// GetRamPerServer returns the RamPerServer field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetRamPerServer() *int32 { if o == nil { return nil } - return o.ReservedIpsOnContract + return o.RamPerServer } -// GetReservedIpsOnContractOk returns a tuple with the ReservedIpsOnContract field value +// GetRamPerServerOk returns a tuple with the RamPerServer field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetReservedIpsOnContractOk() (*int32, bool) { +func (o *ResourceLimits) GetRamPerServerOk() (*int32, bool) { if o == nil { return nil, false } - return o.ReservedIpsOnContract, true + return o.RamPerServer, true } -// SetReservedIpsOnContract sets field value -func (o *ResourceLimits) SetReservedIpsOnContract(v int32) { +// SetRamPerServer sets field value +func (o *ResourceLimits) SetRamPerServer(v int32) { - o.ReservedIpsOnContract = &v + o.RamPerServer = &v } -// HasReservedIpsOnContract returns a boolean if a field has been set. -func (o *ResourceLimits) HasReservedIpsOnContract() bool { - if o != nil && o.ReservedIpsOnContract != nil { +// HasRamPerServer returns a boolean if a field has been set. +func (o *ResourceLimits) HasRamPerServer() bool { + if o != nil && o.RamPerServer != nil { return true } return false } -// GetReservedIpsInUse returns the ReservedIpsInUse field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetReservedIpsInUse() *int32 { +// GetRamProvisioned returns the RamProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetRamProvisioned() *int32 { if o == nil { return nil } - return o.ReservedIpsInUse + return o.RamProvisioned } -// GetReservedIpsInUseOk returns a tuple with the ReservedIpsInUse field value +// GetRamProvisionedOk returns a tuple with the RamProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetReservedIpsInUseOk() (*int32, bool) { +func (o *ResourceLimits) GetRamProvisionedOk() (*int32, bool) { if o == nil { return nil, false } - return o.ReservedIpsInUse, true + return o.RamProvisioned, true } -// SetReservedIpsInUse sets field value -func (o *ResourceLimits) SetReservedIpsInUse(v int32) { +// SetRamProvisioned sets field value +func (o *ResourceLimits) SetRamProvisioned(v int32) { - o.ReservedIpsInUse = &v + o.RamProvisioned = &v } -// HasReservedIpsInUse returns a boolean if a field has been set. -func (o *ResourceLimits) HasReservedIpsInUse() bool { - if o != nil && o.ReservedIpsInUse != nil { +// HasRamProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasRamProvisioned() bool { + if o != nil && o.RamProvisioned != nil { return true } return false } -// GetK8sClusterLimitTotal returns the K8sClusterLimitTotal field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetK8sClusterLimitTotal() *int32 { +// GetReservableIps returns the ReservableIps field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetReservableIps() *int32 { if o == nil { return nil } - return o.K8sClusterLimitTotal + return o.ReservableIps } -// GetK8sClusterLimitTotalOk returns a tuple with the K8sClusterLimitTotal field value +// GetReservableIpsOk returns a tuple with the ReservableIps field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetK8sClusterLimitTotalOk() (*int32, bool) { +func (o *ResourceLimits) GetReservableIpsOk() (*int32, bool) { if o == nil { return nil, false } - return o.K8sClusterLimitTotal, true + return o.ReservableIps, true } -// SetK8sClusterLimitTotal sets field value -func (o *ResourceLimits) SetK8sClusterLimitTotal(v int32) { +// SetReservableIps sets field value +func (o *ResourceLimits) SetReservableIps(v int32) { - o.K8sClusterLimitTotal = &v + o.ReservableIps = &v } -// HasK8sClusterLimitTotal returns a boolean if a field has been set. -func (o *ResourceLimits) HasK8sClusterLimitTotal() bool { - if o != nil && o.K8sClusterLimitTotal != nil { +// HasReservableIps returns a boolean if a field has been set. +func (o *ResourceLimits) HasReservableIps() bool { + if o != nil && o.ReservableIps != nil { return true } return false } -// GetK8sClustersProvisioned returns the K8sClustersProvisioned field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetK8sClustersProvisioned() *int32 { +// GetReservedIpsInUse returns the ReservedIpsInUse field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetReservedIpsInUse() *int32 { if o == nil { return nil } - return o.K8sClustersProvisioned + return o.ReservedIpsInUse } -// GetK8sClustersProvisionedOk returns a tuple with the K8sClustersProvisioned field value +// GetReservedIpsInUseOk returns a tuple with the ReservedIpsInUse field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) { +func (o *ResourceLimits) GetReservedIpsInUseOk() (*int32, bool) { if o == nil { return nil, false } - return o.K8sClustersProvisioned, true + return o.ReservedIpsInUse, true } -// SetK8sClustersProvisioned sets field value -func (o *ResourceLimits) SetK8sClustersProvisioned(v int32) { +// SetReservedIpsInUse sets field value +func (o *ResourceLimits) SetReservedIpsInUse(v int32) { - o.K8sClustersProvisioned = &v + o.ReservedIpsInUse = &v } -// HasK8sClustersProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasK8sClustersProvisioned() bool { - if o != nil && o.K8sClustersProvisioned != nil { +// HasReservedIpsInUse returns a boolean if a field has been set. +func (o *ResourceLimits) HasReservedIpsInUse() bool { + if o != nil && o.ReservedIpsInUse != nil { return true } return false } -// GetNlbLimitTotal returns the NlbLimitTotal field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetNlbLimitTotal() *int32 { +// GetReservedIpsOnContract returns the ReservedIpsOnContract field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetReservedIpsOnContract() *int32 { if o == nil { return nil } - return o.NlbLimitTotal + return o.ReservedIpsOnContract } -// GetNlbLimitTotalOk returns a tuple with the NlbLimitTotal field value +// GetReservedIpsOnContractOk returns a tuple with the ReservedIpsOnContract field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetNlbLimitTotalOk() (*int32, bool) { +func (o *ResourceLimits) GetReservedIpsOnContractOk() (*int32, bool) { if o == nil { return nil, false } - return o.NlbLimitTotal, true + return o.ReservedIpsOnContract, true } -// SetNlbLimitTotal sets field value -func (o *ResourceLimits) SetNlbLimitTotal(v int32) { +// SetReservedIpsOnContract sets field value +func (o *ResourceLimits) SetReservedIpsOnContract(v int32) { - o.NlbLimitTotal = &v + o.ReservedIpsOnContract = &v } -// HasNlbLimitTotal returns a boolean if a field has been set. -func (o *ResourceLimits) HasNlbLimitTotal() bool { - if o != nil && o.NlbLimitTotal != nil { +// HasReservedIpsOnContract returns a boolean if a field has been set. +func (o *ResourceLimits) HasReservedIpsOnContract() bool { + if o != nil && o.ReservedIpsOnContract != nil { return true } return false } -// GetNlbProvisioned returns the NlbProvisioned field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetNlbProvisioned() *int32 { +// GetSsdLimitPerContract returns the SsdLimitPerContract field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetSsdLimitPerContract() *int64 { if o == nil { return nil } - return o.NlbProvisioned + return o.SsdLimitPerContract } -// GetNlbProvisionedOk returns a tuple with the NlbProvisioned field value +// GetSsdLimitPerContractOk returns a tuple with the SsdLimitPerContract field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetNlbProvisionedOk() (*int32, bool) { +func (o *ResourceLimits) GetSsdLimitPerContractOk() (*int64, bool) { if o == nil { return nil, false } - return o.NlbProvisioned, true + return o.SsdLimitPerContract, true } -// SetNlbProvisioned sets field value -func (o *ResourceLimits) SetNlbProvisioned(v int32) { +// SetSsdLimitPerContract sets field value +func (o *ResourceLimits) SetSsdLimitPerContract(v int64) { - o.NlbProvisioned = &v + o.SsdLimitPerContract = &v } -// HasNlbProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasNlbProvisioned() bool { - if o != nil && o.NlbProvisioned != nil { +// HasSsdLimitPerContract returns a boolean if a field has been set. +func (o *ResourceLimits) HasSsdLimitPerContract() bool { + if o != nil && o.SsdLimitPerContract != nil { return true } return false } -// GetNatGatewayLimitTotal returns the NatGatewayLimitTotal field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetNatGatewayLimitTotal() *int32 { +// GetSsdLimitPerVolume returns the SsdLimitPerVolume field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetSsdLimitPerVolume() *int64 { if o == nil { return nil } - return o.NatGatewayLimitTotal + return o.SsdLimitPerVolume } -// GetNatGatewayLimitTotalOk returns a tuple with the NatGatewayLimitTotal field value +// GetSsdLimitPerVolumeOk returns a tuple with the SsdLimitPerVolume field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetNatGatewayLimitTotalOk() (*int32, bool) { +func (o *ResourceLimits) GetSsdLimitPerVolumeOk() (*int64, bool) { if o == nil { return nil, false } - return o.NatGatewayLimitTotal, true + return o.SsdLimitPerVolume, true } -// SetNatGatewayLimitTotal sets field value -func (o *ResourceLimits) SetNatGatewayLimitTotal(v int32) { +// SetSsdLimitPerVolume sets field value +func (o *ResourceLimits) SetSsdLimitPerVolume(v int64) { - o.NatGatewayLimitTotal = &v + o.SsdLimitPerVolume = &v } -// HasNatGatewayLimitTotal returns a boolean if a field has been set. -func (o *ResourceLimits) HasNatGatewayLimitTotal() bool { - if o != nil && o.NatGatewayLimitTotal != nil { +// HasSsdLimitPerVolume returns a boolean if a field has been set. +func (o *ResourceLimits) HasSsdLimitPerVolume() bool { + if o != nil && o.SsdLimitPerVolume != nil { return true } return false } -// GetNatGatewayProvisioned returns the NatGatewayProvisioned field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ResourceLimits) GetNatGatewayProvisioned() *int32 { +// GetSsdVolumeProvisioned returns the SsdVolumeProvisioned field value +// If the value is explicit nil, nil is returned +func (o *ResourceLimits) GetSsdVolumeProvisioned() *int64 { if o == nil { return nil } - return o.NatGatewayProvisioned + return o.SsdVolumeProvisioned } -// GetNatGatewayProvisionedOk returns a tuple with the NatGatewayProvisioned field value +// GetSsdVolumeProvisionedOk returns a tuple with the SsdVolumeProvisioned field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceLimits) GetNatGatewayProvisionedOk() (*int32, bool) { +func (o *ResourceLimits) GetSsdVolumeProvisionedOk() (*int64, bool) { if o == nil { return nil, false } - return o.NatGatewayProvisioned, true + return o.SsdVolumeProvisioned, true } -// SetNatGatewayProvisioned sets field value -func (o *ResourceLimits) SetNatGatewayProvisioned(v int32) { +// SetSsdVolumeProvisioned sets field value +func (o *ResourceLimits) SetSsdVolumeProvisioned(v int64) { - o.NatGatewayProvisioned = &v + o.SsdVolumeProvisioned = &v } -// HasNatGatewayProvisioned returns a boolean if a field has been set. -func (o *ResourceLimits) HasNatGatewayProvisioned() bool { - if o != nil && o.NatGatewayProvisioned != nil { +// HasSsdVolumeProvisioned returns a boolean if a field has been set. +func (o *ResourceLimits) HasSsdVolumeProvisioned() bool { + if o != nil && o.SsdVolumeProvisioned != nil { return true } @@ -941,72 +941,94 @@ func (o *ResourceLimits) HasNatGatewayProvisioned() bool { func (o ResourceLimits) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.CoresPerServer != nil { - toSerialize["coresPerServer"] = o.CoresPerServer - } if o.CoresPerContract != nil { toSerialize["coresPerContract"] = o.CoresPerContract } + + if o.CoresPerServer != nil { + toSerialize["coresPerServer"] = o.CoresPerServer + } + if o.CoresProvisioned != nil { toSerialize["coresProvisioned"] = o.CoresProvisioned } - if o.RamPerServer != nil { - toSerialize["ramPerServer"] = o.RamPerServer - } - if o.RamPerContract != nil { - toSerialize["ramPerContract"] = o.RamPerContract + + if o.DasVolumeProvisioned != nil { + toSerialize["dasVolumeProvisioned"] = o.DasVolumeProvisioned } - if o.RamProvisioned != nil { - toSerialize["ramProvisioned"] = o.RamProvisioned + + if o.HddLimitPerContract != nil { + toSerialize["hddLimitPerContract"] = o.HddLimitPerContract } + if o.HddLimitPerVolume != nil { toSerialize["hddLimitPerVolume"] = o.HddLimitPerVolume } - if o.HddLimitPerContract != nil { - toSerialize["hddLimitPerContract"] = o.HddLimitPerContract - } + if o.HddVolumeProvisioned != nil { toSerialize["hddVolumeProvisioned"] = o.HddVolumeProvisioned } - if o.SsdLimitPerVolume != nil { - toSerialize["ssdLimitPerVolume"] = o.SsdLimitPerVolume - } - if o.SsdLimitPerContract != nil { - toSerialize["ssdLimitPerContract"] = o.SsdLimitPerContract - } - if o.SsdVolumeProvisioned != nil { - toSerialize["ssdVolumeProvisioned"] = o.SsdVolumeProvisioned - } - if o.DasVolumeProvisioned != nil { - toSerialize["dasVolumeProvisioned"] = o.DasVolumeProvisioned - } - if o.ReservableIps != nil { - toSerialize["reservableIps"] = o.ReservableIps - } - if o.ReservedIpsOnContract != nil { - toSerialize["reservedIpsOnContract"] = o.ReservedIpsOnContract - } - if o.ReservedIpsInUse != nil { - toSerialize["reservedIpsInUse"] = o.ReservedIpsInUse - } + if o.K8sClusterLimitTotal != nil { toSerialize["k8sClusterLimitTotal"] = o.K8sClusterLimitTotal } + if o.K8sClustersProvisioned != nil { toSerialize["k8sClustersProvisioned"] = o.K8sClustersProvisioned } + + if o.NatGatewayLimitTotal != nil { + toSerialize["natGatewayLimitTotal"] = o.NatGatewayLimitTotal + } + + if o.NatGatewayProvisioned != nil { + toSerialize["natGatewayProvisioned"] = o.NatGatewayProvisioned + } + if o.NlbLimitTotal != nil { toSerialize["nlbLimitTotal"] = o.NlbLimitTotal } + if o.NlbProvisioned != nil { toSerialize["nlbProvisioned"] = o.NlbProvisioned } - if o.NatGatewayLimitTotal != nil { - toSerialize["natGatewayLimitTotal"] = o.NatGatewayLimitTotal + + if o.RamPerContract != nil { + toSerialize["ramPerContract"] = o.RamPerContract } - if o.NatGatewayProvisioned != nil { - toSerialize["natGatewayProvisioned"] = o.NatGatewayProvisioned + + if o.RamPerServer != nil { + toSerialize["ramPerServer"] = o.RamPerServer } + + if o.RamProvisioned != nil { + toSerialize["ramProvisioned"] = o.RamProvisioned + } + + if o.ReservableIps != nil { + toSerialize["reservableIps"] = o.ReservableIps + } + + if o.ReservedIpsInUse != nil { + toSerialize["reservedIpsInUse"] = o.ReservedIpsInUse + } + + if o.ReservedIpsOnContract != nil { + toSerialize["reservedIpsOnContract"] = o.ReservedIpsOnContract + } + + if o.SsdLimitPerContract != nil { + toSerialize["ssdLimitPerContract"] = o.SsdLimitPerContract + } + + if o.SsdLimitPerVolume != nil { + toSerialize["ssdLimitPerVolume"] = o.SsdLimitPerVolume + } + + if o.SsdVolumeProvisioned != nil { + toSerialize["ssdVolumeProvisioned"] = o.SsdVolumeProvisioned + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_properties.go index b5b274a28..224350ec5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_properties.go @@ -41,7 +41,7 @@ func NewResourcePropertiesWithDefaults() *ResourceProperties { } // GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ResourceProperties) GetName() *string { if o == nil { return nil @@ -79,7 +79,7 @@ func (o *ResourceProperties) HasName() bool { } // GetSecAuthProtection returns the SecAuthProtection field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *ResourceProperties) GetSecAuthProtection() *bool { if o == nil { return nil @@ -121,9 +121,11 @@ func (o ResourceProperties) MarshalJSON() ([]byte, error) { if o.Name != nil { toSerialize["name"] = o.Name } + if o.SecAuthProtection != nil { toSerialize["secAuthProtection"] = o.SecAuthProtection } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_reference.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_reference.go index 5732ab570..6b58f76a3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_reference.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_reference.go @@ -16,12 +16,12 @@ import ( // ResourceReference struct for ResourceReference type ResourceReference struct { + // URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. Id *string `json:"id"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` } // NewResourceReference instantiates a new ResourceReference object @@ -44,114 +44,114 @@ func NewResourceReferenceWithDefaults() *ResourceReference { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ResourceReference) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ResourceReference) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceReference) GetIdOk() (*string, bool) { +func (o *ResourceReference) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *ResourceReference) SetId(v string) { +// SetHref sets field value +func (o *ResourceReference) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *ResourceReference) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *ResourceReference) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ResourceReference) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ResourceReference) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceReference) GetTypeOk() (*Type, bool) { +func (o *ResourceReference) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *ResourceReference) SetType(v Type) { +// SetId sets field value +func (o *ResourceReference) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *ResourceReference) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *ResourceReference) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ResourceReference) GetHref() *string { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ResourceReference) GetType() *Type { if o == nil { return nil } - return o.Href + return o.Type } -// GetHrefOk returns a tuple with the Href field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourceReference) GetHrefOk() (*string, bool) { +func (o *ResourceReference) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Type, true } -// SetHref sets field value -func (o *ResourceReference) SetHref(v string) { +// SetType sets field value +func (o *ResourceReference) SetType(v Type) { - o.Href = &v + o.Type = &v } -// HasHref returns a boolean if a field has been set. -func (o *ResourceReference) HasHref() bool { - if o != nil && o.Href != nil { +// HasType returns a boolean if a field has been set. +func (o *ResourceReference) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -160,15 +160,18 @@ func (o *ResourceReference) HasHref() bool { func (o ResourceReference) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.Href != nil { + toSerialize["href"] = o.Href + } + if o.Id != nil { toSerialize["id"] = o.Id } + if o.Type != nil { toSerialize["type"] = o.Type } - if o.Href != nil { - toSerialize["href"] = o.Href - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources.go index 36b8beeab..9db504339 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources.go @@ -16,14 +16,14 @@ import ( // Resources Collection to represent the resource. type Resources struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Resource `json:"items,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewResources instantiates a new Resources object @@ -44,152 +44,152 @@ func NewResourcesWithDefaults() *Resources { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Resources) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Resources) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resources) GetIdOk() (*string, bool) { +func (o *Resources) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Resources) SetId(v string) { +// SetHref sets field value +func (o *Resources) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Resources) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Resources) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Resources) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Resources) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resources) GetTypeOk() (*Type, bool) { +func (o *Resources) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Resources) SetType(v Type) { +// SetId sets field value +func (o *Resources) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Resources) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Resources) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Resources) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Resources) GetItems() *[]Resource { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resources) GetHrefOk() (*string, bool) { +func (o *Resources) GetItemsOk() (*[]Resource, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Resources) SetHref(v string) { +// SetItems sets field value +func (o *Resources) SetItems(v []Resource) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Resources) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Resources) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Resource will be returned -func (o *Resources) GetItems() *[]Resource { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Resources) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Resources) GetItemsOk() (*[]Resource, bool) { +func (o *Resources) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Resources) SetItems(v []Resource) { +// SetType sets field value +func (o *Resources) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Resources) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Resources) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Resources) HasItems() bool { func (o Resources) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources_users.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources_users.go index 18de5cf52..e8772006c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources_users.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resources_users.go @@ -16,14 +16,14 @@ import ( // ResourcesUsers Resources owned by a user. type ResourcesUsers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Resource `json:"items,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewResourcesUsers instantiates a new ResourcesUsers object @@ -44,152 +44,152 @@ func NewResourcesUsersWithDefaults() *ResourcesUsers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ResourcesUsers) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *ResourcesUsers) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourcesUsers) GetIdOk() (*string, bool) { +func (o *ResourcesUsers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *ResourcesUsers) SetId(v string) { +// SetHref sets field value +func (o *ResourcesUsers) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *ResourcesUsers) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *ResourcesUsers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *ResourcesUsers) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *ResourcesUsers) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourcesUsers) GetTypeOk() (*Type, bool) { +func (o *ResourcesUsers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *ResourcesUsers) SetType(v Type) { +// SetId sets field value +func (o *ResourcesUsers) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *ResourcesUsers) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *ResourcesUsers) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ResourcesUsers) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *ResourcesUsers) GetItems() *[]Resource { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourcesUsers) GetHrefOk() (*string, bool) { +func (o *ResourcesUsers) GetItemsOk() (*[]Resource, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *ResourcesUsers) SetHref(v string) { +// SetItems sets field value +func (o *ResourcesUsers) SetItems(v []Resource) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *ResourcesUsers) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *ResourcesUsers) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Resource will be returned -func (o *ResourcesUsers) GetItems() *[]Resource { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *ResourcesUsers) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ResourcesUsers) GetItemsOk() (*[]Resource, bool) { +func (o *ResourcesUsers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *ResourcesUsers) SetItems(v []Resource) { +// SetType sets field value +func (o *ResourcesUsers) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *ResourcesUsers) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *ResourcesUsers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *ResourcesUsers) HasItems() bool { func (o ResourcesUsers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go index bede84667..5e263873c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go @@ -41,7 +41,7 @@ func NewS3BucketWithDefaults() *S3Bucket { } // GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *S3Bucket) GetName() *string { if o == nil { return nil @@ -83,6 +83,7 @@ func (o S3Bucket) MarshalJSON() ([]byte, error) { if o.Name != nil { toSerialize["name"] = o.Name } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key.go index 5febf4192..ae9f00908 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key.go @@ -16,14 +16,14 @@ import ( // S3Key struct for S3Key type S3Key struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *S3KeyMetadata `json:"metadata,omitempty"` Properties *S3KeyProperties `json:"properties"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewS3Key instantiates a new S3Key object @@ -46,190 +46,190 @@ func NewS3KeyWithDefaults() *S3Key { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *S3Key) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *S3Key) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Key) GetIdOk() (*string, bool) { +func (o *S3Key) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *S3Key) SetId(v string) { +// SetHref sets field value +func (o *S3Key) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *S3Key) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *S3Key) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *S3Key) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *S3Key) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Key) GetTypeOk() (*Type, bool) { +func (o *S3Key) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *S3Key) SetType(v Type) { +// SetId sets field value +func (o *S3Key) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *S3Key) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *S3Key) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *S3Key) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *S3Key) GetMetadata() *S3KeyMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Key) GetHrefOk() (*string, bool) { +func (o *S3Key) GetMetadataOk() (*S3KeyMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *S3Key) SetHref(v string) { +// SetMetadata sets field value +func (o *S3Key) SetMetadata(v S3KeyMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *S3Key) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *S3Key) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for S3KeyMetadata will be returned -func (o *S3Key) GetMetadata() *S3KeyMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *S3Key) GetProperties() *S3KeyProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Key) GetMetadataOk() (*S3KeyMetadata, bool) { +func (o *S3Key) GetPropertiesOk() (*S3KeyProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *S3Key) SetMetadata(v S3KeyMetadata) { +// SetProperties sets field value +func (o *S3Key) SetProperties(v S3KeyProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *S3Key) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *S3Key) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for S3KeyProperties will be returned -func (o *S3Key) GetProperties() *S3KeyProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *S3Key) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Key) GetPropertiesOk() (*S3KeyProperties, bool) { +func (o *S3Key) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *S3Key) SetProperties(v S3KeyProperties) { +// SetType sets field value +func (o *S3Key) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *S3Key) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *S3Key) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *S3Key) HasProperties() bool { func (o S3Key) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_metadata.go index 83ecc8b46..f090555e7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_metadata.go @@ -17,10 +17,10 @@ import ( // S3KeyMetadata struct for S3KeyMetadata type S3KeyMetadata struct { - // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. - Etag *string `json:"etag,omitempty"` // The time when the S3 key was created. CreatedDate *IonosTime + // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. + Etag *string `json:"etag,omitempty"` } // NewS3KeyMetadata instantiates a new S3KeyMetadata object @@ -41,83 +41,83 @@ func NewS3KeyMetadataWithDefaults() *S3KeyMetadata { return &this } -// GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned -func (o *S3KeyMetadata) GetEtag() *string { +// GetCreatedDate returns the CreatedDate field value +// If the value is explicit nil, nil is returned +func (o *S3KeyMetadata) GetCreatedDate() *time.Time { if o == nil { return nil } - return o.Etag + if o.CreatedDate == nil { + return nil + } + return &o.CreatedDate.Time } -// GetEtagOk returns a tuple with the Etag field value +// GetCreatedDateOk returns a tuple with the CreatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3KeyMetadata) GetEtagOk() (*string, bool) { +func (o *S3KeyMetadata) GetCreatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - return o.Etag, true + if o.CreatedDate == nil { + return nil, false + } + return &o.CreatedDate.Time, true + } -// SetEtag sets field value -func (o *S3KeyMetadata) SetEtag(v string) { +// SetCreatedDate sets field value +func (o *S3KeyMetadata) SetCreatedDate(v time.Time) { - o.Etag = &v + o.CreatedDate = &IonosTime{v} } -// HasEtag returns a boolean if a field has been set. -func (o *S3KeyMetadata) HasEtag() bool { - if o != nil && o.Etag != nil { +// HasCreatedDate returns a boolean if a field has been set. +func (o *S3KeyMetadata) HasCreatedDate() bool { + if o != nil && o.CreatedDate != nil { return true } return false } -// GetCreatedDate returns the CreatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *S3KeyMetadata) GetCreatedDate() *time.Time { +// GetEtag returns the Etag field value +// If the value is explicit nil, nil is returned +func (o *S3KeyMetadata) GetEtag() *string { if o == nil { return nil } - if o.CreatedDate == nil { - return nil - } - return &o.CreatedDate.Time + return o.Etag } -// GetCreatedDateOk returns a tuple with the CreatedDate field value +// GetEtagOk returns a tuple with the Etag field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3KeyMetadata) GetCreatedDateOk() (*time.Time, bool) { +func (o *S3KeyMetadata) GetEtagOk() (*string, bool) { if o == nil { return nil, false } - if o.CreatedDate == nil { - return nil, false - } - return &o.CreatedDate.Time, true - + return o.Etag, true } -// SetCreatedDate sets field value -func (o *S3KeyMetadata) SetCreatedDate(v time.Time) { +// SetEtag sets field value +func (o *S3KeyMetadata) SetEtag(v string) { - o.CreatedDate = &IonosTime{v} + o.Etag = &v } -// HasCreatedDate returns a boolean if a field has been set. -func (o *S3KeyMetadata) HasCreatedDate() bool { - if o != nil && o.CreatedDate != nil { +// HasEtag returns a boolean if a field has been set. +func (o *S3KeyMetadata) HasEtag() bool { + if o != nil && o.Etag != nil { return true } @@ -126,12 +126,14 @@ func (o *S3KeyMetadata) HasCreatedDate() bool { func (o S3KeyMetadata) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Etag != nil { - toSerialize["etag"] = o.Etag - } if o.CreatedDate != nil { toSerialize["createdDate"] = o.CreatedDate } + + if o.Etag != nil { + toSerialize["etag"] = o.Etag + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_properties.go index a3204d717..0cf12e771 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_key_properties.go @@ -16,10 +16,10 @@ import ( // S3KeyProperties struct for S3KeyProperties type S3KeyProperties struct { - // Secret of the S3 key. - SecretKey *string `json:"secretKey,omitempty"` // Denotes weather the S3 key is active. Active *bool `json:"active,omitempty"` + // Secret of the S3 key. + SecretKey *string `json:"secretKey,omitempty"` } // NewS3KeyProperties instantiates a new S3KeyProperties object @@ -40,76 +40,76 @@ func NewS3KeyPropertiesWithDefaults() *S3KeyProperties { return &this } -// GetSecretKey returns the SecretKey field value -// If the value is explicit nil, the zero value for string will be returned -func (o *S3KeyProperties) GetSecretKey() *string { +// GetActive returns the Active field value +// If the value is explicit nil, nil is returned +func (o *S3KeyProperties) GetActive() *bool { if o == nil { return nil } - return o.SecretKey + return o.Active } -// GetSecretKeyOk returns a tuple with the SecretKey field value +// GetActiveOk returns a tuple with the Active field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3KeyProperties) GetSecretKeyOk() (*string, bool) { +func (o *S3KeyProperties) GetActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.SecretKey, true + return o.Active, true } -// SetSecretKey sets field value -func (o *S3KeyProperties) SetSecretKey(v string) { +// SetActive sets field value +func (o *S3KeyProperties) SetActive(v bool) { - o.SecretKey = &v + o.Active = &v } -// HasSecretKey returns a boolean if a field has been set. -func (o *S3KeyProperties) HasSecretKey() bool { - if o != nil && o.SecretKey != nil { +// HasActive returns a boolean if a field has been set. +func (o *S3KeyProperties) HasActive() bool { + if o != nil && o.Active != nil { return true } return false } -// GetActive returns the Active field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *S3KeyProperties) GetActive() *bool { +// GetSecretKey returns the SecretKey field value +// If the value is explicit nil, nil is returned +func (o *S3KeyProperties) GetSecretKey() *string { if o == nil { return nil } - return o.Active + return o.SecretKey } -// GetActiveOk returns a tuple with the Active field value +// GetSecretKeyOk returns a tuple with the SecretKey field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3KeyProperties) GetActiveOk() (*bool, bool) { +func (o *S3KeyProperties) GetSecretKeyOk() (*string, bool) { if o == nil { return nil, false } - return o.Active, true + return o.SecretKey, true } -// SetActive sets field value -func (o *S3KeyProperties) SetActive(v bool) { +// SetSecretKey sets field value +func (o *S3KeyProperties) SetSecretKey(v string) { - o.Active = &v + o.SecretKey = &v } -// HasActive returns a boolean if a field has been set. -func (o *S3KeyProperties) HasActive() bool { - if o != nil && o.Active != nil { +// HasSecretKey returns a boolean if a field has been set. +func (o *S3KeyProperties) HasSecretKey() bool { + if o != nil && o.SecretKey != nil { return true } @@ -118,12 +118,14 @@ func (o *S3KeyProperties) HasActive() bool { func (o S3KeyProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.SecretKey != nil { - toSerialize["secretKey"] = o.SecretKey - } if o.Active != nil { toSerialize["active"] = o.Active } + + if o.SecretKey != nil { + toSerialize["secretKey"] = o.SecretKey + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_keys.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_keys.go index a87bc2f76..d0ed78cc6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_keys.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_keys.go @@ -16,14 +16,14 @@ import ( // S3Keys struct for S3Keys type S3Keys struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of the resource. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]S3Key `json:"items,omitempty"` + // The type of the resource. + Type *Type `json:"type,omitempty"` } // NewS3Keys instantiates a new S3Keys object @@ -44,152 +44,152 @@ func NewS3KeysWithDefaults() *S3Keys { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *S3Keys) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *S3Keys) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Keys) GetIdOk() (*string, bool) { +func (o *S3Keys) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *S3Keys) SetId(v string) { +// SetHref sets field value +func (o *S3Keys) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *S3Keys) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *S3Keys) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *S3Keys) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *S3Keys) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Keys) GetTypeOk() (*Type, bool) { +func (o *S3Keys) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *S3Keys) SetType(v Type) { +// SetId sets field value +func (o *S3Keys) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *S3Keys) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *S3Keys) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *S3Keys) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *S3Keys) GetItems() *[]S3Key { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Keys) GetHrefOk() (*string, bool) { +func (o *S3Keys) GetItemsOk() (*[]S3Key, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *S3Keys) SetHref(v string) { +// SetItems sets field value +func (o *S3Keys) SetItems(v []S3Key) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *S3Keys) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *S3Keys) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []S3Key will be returned -func (o *S3Keys) GetItems() *[]S3Key { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *S3Keys) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *S3Keys) GetItemsOk() (*[]S3Key, bool) { +func (o *S3Keys) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *S3Keys) SetItems(v []S3Key) { +// SetType sets field value +func (o *S3Keys) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *S3Keys) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *S3Keys) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *S3Keys) HasItems() bool { func (o S3Keys) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_object_storage_sso.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_object_storage_sso.go index 48eb4b166..809410a04 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_object_storage_sso.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_object_storage_sso.go @@ -39,7 +39,7 @@ func NewS3ObjectStorageSSOWithDefaults() *S3ObjectStorageSSO { } // GetSsoUrl returns the SsoUrl field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *S3ObjectStorageSSO) GetSsoUrl() *string { if o == nil { return nil @@ -81,6 +81,7 @@ func (o S3ObjectStorageSSO) MarshalJSON() ([]byte, error) { if o.SsoUrl != nil { toSerialize["ssoUrl"] = o.SsoUrl } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server.go index ad75b98a1..4002c4bda 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server.go @@ -16,15 +16,15 @@ import ( // Server struct for Server type Server struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *ServerEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ServerProperties `json:"properties"` - Entities *ServerEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewServer instantiates a new Server object @@ -47,114 +47,114 @@ func NewServerWithDefaults() *Server { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Server) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *Server) GetEntities() *ServerEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Server) GetIdOk() (*string, bool) { +func (o *Server) GetEntitiesOk() (*ServerEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *Server) SetId(v string) { +// SetEntities sets field value +func (o *Server) SetEntities(v ServerEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *Server) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *Server) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Server) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Server) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Server) GetTypeOk() (*Type, bool) { +func (o *Server) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Server) SetType(v Type) { +// SetHref sets field value +func (o *Server) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Server) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Server) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Server) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Server) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Server) GetHrefOk() (*string, bool) { +func (o *Server) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Server) SetHref(v string) { +// SetId sets field value +func (o *Server) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Server) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Server) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *Server) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned +// If the value is explicit nil, nil is returned func (o *Server) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *Server) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for ServerProperties will be returned +// If the value is explicit nil, nil is returned func (o *Server) GetProperties() *ServerProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *Server) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for ServerEntities will be returned -func (o *Server) GetEntities() *ServerEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Server) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Server) GetEntitiesOk() (*ServerEntities, bool) { +func (o *Server) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *Server) SetEntities(v ServerEntities) { +// SetType sets field value +func (o *Server) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *Server) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *Server) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *Server) HasEntities() bool { func (o Server) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_entities.go index fcb42fdb2..a2a8b8912 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_entities.go @@ -17,8 +17,8 @@ import ( // ServerEntities struct for ServerEntities type ServerEntities struct { Cdroms *Cdroms `json:"cdroms,omitempty"` - Volumes *AttachedVolumes `json:"volumes,omitempty"` Nics *Nics `json:"nics,omitempty"` + Volumes *AttachedVolumes `json:"volumes,omitempty"` } // NewServerEntities instantiates a new ServerEntities object @@ -40,7 +40,7 @@ func NewServerEntitiesWithDefaults() *ServerEntities { } // GetCdroms returns the Cdroms field value -// If the value is explicit nil, the zero value for Cdroms will be returned +// If the value is explicit nil, nil is returned func (o *ServerEntities) GetCdroms() *Cdroms { if o == nil { return nil @@ -77,76 +77,76 @@ func (o *ServerEntities) HasCdroms() bool { return false } -// GetVolumes returns the Volumes field value -// If the value is explicit nil, the zero value for AttachedVolumes will be returned -func (o *ServerEntities) GetVolumes() *AttachedVolumes { +// GetNics returns the Nics field value +// If the value is explicit nil, nil is returned +func (o *ServerEntities) GetNics() *Nics { if o == nil { return nil } - return o.Volumes + return o.Nics } -// GetVolumesOk returns a tuple with the Volumes field value +// GetNicsOk returns a tuple with the Nics field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerEntities) GetVolumesOk() (*AttachedVolumes, bool) { +func (o *ServerEntities) GetNicsOk() (*Nics, bool) { if o == nil { return nil, false } - return o.Volumes, true + return o.Nics, true } -// SetVolumes sets field value -func (o *ServerEntities) SetVolumes(v AttachedVolumes) { +// SetNics sets field value +func (o *ServerEntities) SetNics(v Nics) { - o.Volumes = &v + o.Nics = &v } -// HasVolumes returns a boolean if a field has been set. -func (o *ServerEntities) HasVolumes() bool { - if o != nil && o.Volumes != nil { +// HasNics returns a boolean if a field has been set. +func (o *ServerEntities) HasNics() bool { + if o != nil && o.Nics != nil { return true } return false } -// GetNics returns the Nics field value -// If the value is explicit nil, the zero value for Nics will be returned -func (o *ServerEntities) GetNics() *Nics { +// GetVolumes returns the Volumes field value +// If the value is explicit nil, nil is returned +func (o *ServerEntities) GetVolumes() *AttachedVolumes { if o == nil { return nil } - return o.Nics + return o.Volumes } -// GetNicsOk returns a tuple with the Nics field value +// GetVolumesOk returns a tuple with the Volumes field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerEntities) GetNicsOk() (*Nics, bool) { +func (o *ServerEntities) GetVolumesOk() (*AttachedVolumes, bool) { if o == nil { return nil, false } - return o.Nics, true + return o.Volumes, true } -// SetNics sets field value -func (o *ServerEntities) SetNics(v Nics) { +// SetVolumes sets field value +func (o *ServerEntities) SetVolumes(v AttachedVolumes) { - o.Nics = &v + o.Volumes = &v } -// HasNics returns a boolean if a field has been set. -func (o *ServerEntities) HasNics() bool { - if o != nil && o.Nics != nil { +// HasVolumes returns a boolean if a field has been set. +func (o *ServerEntities) HasVolumes() bool { + if o != nil && o.Volumes != nil { return true } @@ -158,12 +158,15 @@ func (o ServerEntities) MarshalJSON() ([]byte, error) { if o.Cdroms != nil { toSerialize["cdroms"] = o.Cdroms } - if o.Volumes != nil { - toSerialize["volumes"] = o.Volumes - } + if o.Nics != nil { toSerialize["nics"] = o.Nics } + + if o.Volumes != nil { + toSerialize["volumes"] = o.Volumes + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go index 4f292cde9..5b81d9551 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go @@ -16,24 +16,26 @@ import ( // ServerProperties struct for ServerProperties type ServerProperties struct { - // The ID of the template for creating a CUBE server; the available templates for CUBE servers can be found on the templates resource. - TemplateUuid *string `json:"templateUuid,omitempty"` - // The name of the resource. - Name *string `json:"name,omitempty"` + // The availability zone in which the server should be provisioned. + AvailabilityZone *string `json:"availabilityZone,omitempty"` + BootCdrom *ResourceReference `json:"bootCdrom,omitempty"` + BootVolume *ResourceReference `json:"bootVolume,omitempty"` // The total number of cores for the enterprise server. Cores *int32 `json:"cores,omitempty"` - // The memory size for the enterprise server in MB, such as 2048. Size must be specified in multiples of 256 MB with a minimum of 256 MB; however, if you set ramHotPlug to TRUE then you must use a minimum of 1024 MB. If you set the RAM size more than 240GB, then ramHotPlug will be set to FALSE and can not be set to TRUE unless RAM size not set to less than 240GB. - Ram *int32 `json:"ram,omitempty"` - // The availability zone in which the server should be provisioned. - AvailabilityZone *string `json:"availabilityZone,omitempty"` - // Status of the virtual machine. - VmState *string `json:"vmState,omitempty"` - BootCdrom *ResourceReference `json:"bootCdrom,omitempty"` - BootVolume *ResourceReference `json:"bootVolume,omitempty"` // CPU architecture on which server gets provisioned; not all CPU architectures are available in all datacenter regions; available CPU architectures can be retrieved from the datacenter resource; must not be provided for CUBE servers. CpuFamily *string `json:"cpuFamily,omitempty"` - // Server type. + // The name of the resource. + Name *string `json:"name,omitempty"` + // The placement group ID that belongs to this server; Requires system privileges + PlacementGroupId *string `json:"placementGroupId,omitempty"` + // The memory size for the enterprise server in MB, such as 2048. Size must be specified in multiples of 256 MB with a minimum of 256 MB; however, if you set ramHotPlug to TRUE then you must use a minimum of 1024 MB. If you set the RAM size more than 240GB, then ramHotPlug will be set to FALSE and can not be set to TRUE unless RAM size not set to less than 240GB. + Ram *int32 `json:"ram,omitempty"` + // The ID of the template for creating a CUBE server; the available templates for CUBE servers can be found on the templates resource. + TemplateUuid *string `json:"templateUuid,omitempty"` + // Server type: CUBE or ENTERPRISE. Type *string `json:"type,omitempty"` + // Status of the virtual machine. + VmState *string `json:"vmState,omitempty"` } // NewServerProperties instantiates a new ServerProperties object @@ -54,342 +56,342 @@ func NewServerPropertiesWithDefaults() *ServerProperties { return &this } -// GetTemplateUuid returns the TemplateUuid field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ServerProperties) GetTemplateUuid() *string { +// GetAvailabilityZone returns the AvailabilityZone field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetAvailabilityZone() *string { if o == nil { return nil } - return o.TemplateUuid + return o.AvailabilityZone } -// GetTemplateUuidOk returns a tuple with the TemplateUuid field value +// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetTemplateUuidOk() (*string, bool) { +func (o *ServerProperties) GetAvailabilityZoneOk() (*string, bool) { if o == nil { return nil, false } - return o.TemplateUuid, true + return o.AvailabilityZone, true } -// SetTemplateUuid sets field value -func (o *ServerProperties) SetTemplateUuid(v string) { +// SetAvailabilityZone sets field value +func (o *ServerProperties) SetAvailabilityZone(v string) { - o.TemplateUuid = &v + o.AvailabilityZone = &v } -// HasTemplateUuid returns a boolean if a field has been set. -func (o *ServerProperties) HasTemplateUuid() bool { - if o != nil && o.TemplateUuid != nil { +// HasAvailabilityZone returns a boolean if a field has been set. +func (o *ServerProperties) HasAvailabilityZone() bool { + if o != nil && o.AvailabilityZone != nil { return true } return false } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ServerProperties) GetName() *string { +// GetBootCdrom returns the BootCdrom field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetBootCdrom() *ResourceReference { if o == nil { return nil } - return o.Name + return o.BootCdrom } -// GetNameOk returns a tuple with the Name field value +// GetBootCdromOk returns a tuple with the BootCdrom field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetNameOk() (*string, bool) { +func (o *ServerProperties) GetBootCdromOk() (*ResourceReference, bool) { if o == nil { return nil, false } - return o.Name, true + return o.BootCdrom, true } -// SetName sets field value -func (o *ServerProperties) SetName(v string) { +// SetBootCdrom sets field value +func (o *ServerProperties) SetBootCdrom(v ResourceReference) { - o.Name = &v + o.BootCdrom = &v } -// HasName returns a boolean if a field has been set. -func (o *ServerProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasBootCdrom returns a boolean if a field has been set. +func (o *ServerProperties) HasBootCdrom() bool { + if o != nil && o.BootCdrom != nil { return true } return false } -// GetCores returns the Cores field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ServerProperties) GetCores() *int32 { +// GetBootVolume returns the BootVolume field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetBootVolume() *ResourceReference { if o == nil { return nil } - return o.Cores + return o.BootVolume } -// GetCoresOk returns a tuple with the Cores field value +// GetBootVolumeOk returns a tuple with the BootVolume field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetCoresOk() (*int32, bool) { +func (o *ServerProperties) GetBootVolumeOk() (*ResourceReference, bool) { if o == nil { return nil, false } - return o.Cores, true + return o.BootVolume, true } -// SetCores sets field value -func (o *ServerProperties) SetCores(v int32) { +// SetBootVolume sets field value +func (o *ServerProperties) SetBootVolume(v ResourceReference) { - o.Cores = &v + o.BootVolume = &v } -// HasCores returns a boolean if a field has been set. -func (o *ServerProperties) HasCores() bool { - if o != nil && o.Cores != nil { +// HasBootVolume returns a boolean if a field has been set. +func (o *ServerProperties) HasBootVolume() bool { + if o != nil && o.BootVolume != nil { return true } return false } -// GetRam returns the Ram field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *ServerProperties) GetRam() *int32 { +// GetCores returns the Cores field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetCores() *int32 { if o == nil { return nil } - return o.Ram + return o.Cores } -// GetRamOk returns a tuple with the Ram field value +// GetCoresOk returns a tuple with the Cores field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetRamOk() (*int32, bool) { +func (o *ServerProperties) GetCoresOk() (*int32, bool) { if o == nil { return nil, false } - return o.Ram, true + return o.Cores, true } -// SetRam sets field value -func (o *ServerProperties) SetRam(v int32) { +// SetCores sets field value +func (o *ServerProperties) SetCores(v int32) { - o.Ram = &v + o.Cores = &v } -// HasRam returns a boolean if a field has been set. -func (o *ServerProperties) HasRam() bool { - if o != nil && o.Ram != nil { +// HasCores returns a boolean if a field has been set. +func (o *ServerProperties) HasCores() bool { + if o != nil && o.Cores != nil { return true } return false } -// GetAvailabilityZone returns the AvailabilityZone field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ServerProperties) GetAvailabilityZone() *string { +// GetCpuFamily returns the CpuFamily field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetCpuFamily() *string { if o == nil { return nil } - return o.AvailabilityZone + return o.CpuFamily } -// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value +// GetCpuFamilyOk returns a tuple with the CpuFamily field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetAvailabilityZoneOk() (*string, bool) { +func (o *ServerProperties) GetCpuFamilyOk() (*string, bool) { if o == nil { return nil, false } - return o.AvailabilityZone, true + return o.CpuFamily, true } -// SetAvailabilityZone sets field value -func (o *ServerProperties) SetAvailabilityZone(v string) { +// SetCpuFamily sets field value +func (o *ServerProperties) SetCpuFamily(v string) { - o.AvailabilityZone = &v + o.CpuFamily = &v } -// HasAvailabilityZone returns a boolean if a field has been set. -func (o *ServerProperties) HasAvailabilityZone() bool { - if o != nil && o.AvailabilityZone != nil { +// HasCpuFamily returns a boolean if a field has been set. +func (o *ServerProperties) HasCpuFamily() bool { + if o != nil && o.CpuFamily != nil { return true } return false } -// GetVmState returns the VmState field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ServerProperties) GetVmState() *string { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetName() *string { if o == nil { return nil } - return o.VmState + return o.Name } -// GetVmStateOk returns a tuple with the VmState field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetVmStateOk() (*string, bool) { +func (o *ServerProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.VmState, true + return o.Name, true } -// SetVmState sets field value -func (o *ServerProperties) SetVmState(v string) { +// SetName sets field value +func (o *ServerProperties) SetName(v string) { - o.VmState = &v + o.Name = &v } -// HasVmState returns a boolean if a field has been set. -func (o *ServerProperties) HasVmState() bool { - if o != nil && o.VmState != nil { +// HasName returns a boolean if a field has been set. +func (o *ServerProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetBootCdrom returns the BootCdrom field value -// If the value is explicit nil, the zero value for ResourceReference will be returned -func (o *ServerProperties) GetBootCdrom() *ResourceReference { +// GetPlacementGroupId returns the PlacementGroupId field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetPlacementGroupId() *string { if o == nil { return nil } - return o.BootCdrom + return o.PlacementGroupId } -// GetBootCdromOk returns a tuple with the BootCdrom field value +// GetPlacementGroupIdOk returns a tuple with the PlacementGroupId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetBootCdromOk() (*ResourceReference, bool) { +func (o *ServerProperties) GetPlacementGroupIdOk() (*string, bool) { if o == nil { return nil, false } - return o.BootCdrom, true + return o.PlacementGroupId, true } -// SetBootCdrom sets field value -func (o *ServerProperties) SetBootCdrom(v ResourceReference) { +// SetPlacementGroupId sets field value +func (o *ServerProperties) SetPlacementGroupId(v string) { - o.BootCdrom = &v + o.PlacementGroupId = &v } -// HasBootCdrom returns a boolean if a field has been set. -func (o *ServerProperties) HasBootCdrom() bool { - if o != nil && o.BootCdrom != nil { +// HasPlacementGroupId returns a boolean if a field has been set. +func (o *ServerProperties) HasPlacementGroupId() bool { + if o != nil && o.PlacementGroupId != nil { return true } return false } -// GetBootVolume returns the BootVolume field value -// If the value is explicit nil, the zero value for ResourceReference will be returned -func (o *ServerProperties) GetBootVolume() *ResourceReference { +// GetRam returns the Ram field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetRam() *int32 { if o == nil { return nil } - return o.BootVolume + return o.Ram } -// GetBootVolumeOk returns a tuple with the BootVolume field value +// GetRamOk returns a tuple with the Ram field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetBootVolumeOk() (*ResourceReference, bool) { +func (o *ServerProperties) GetRamOk() (*int32, bool) { if o == nil { return nil, false } - return o.BootVolume, true + return o.Ram, true } -// SetBootVolume sets field value -func (o *ServerProperties) SetBootVolume(v ResourceReference) { +// SetRam sets field value +func (o *ServerProperties) SetRam(v int32) { - o.BootVolume = &v + o.Ram = &v } -// HasBootVolume returns a boolean if a field has been set. -func (o *ServerProperties) HasBootVolume() bool { - if o != nil && o.BootVolume != nil { +// HasRam returns a boolean if a field has been set. +func (o *ServerProperties) HasRam() bool { + if o != nil && o.Ram != nil { return true } return false } -// GetCpuFamily returns the CpuFamily field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ServerProperties) GetCpuFamily() *string { +// GetTemplateUuid returns the TemplateUuid field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetTemplateUuid() *string { if o == nil { return nil } - return o.CpuFamily + return o.TemplateUuid } -// GetCpuFamilyOk returns a tuple with the CpuFamily field value +// GetTemplateUuidOk returns a tuple with the TemplateUuid field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetCpuFamilyOk() (*string, bool) { +func (o *ServerProperties) GetTemplateUuidOk() (*string, bool) { if o == nil { return nil, false } - return o.CpuFamily, true + return o.TemplateUuid, true } -// SetCpuFamily sets field value -func (o *ServerProperties) SetCpuFamily(v string) { +// SetTemplateUuid sets field value +func (o *ServerProperties) SetTemplateUuid(v string) { - o.CpuFamily = &v + o.TemplateUuid = &v } -// HasCpuFamily returns a boolean if a field has been set. -func (o *ServerProperties) HasCpuFamily() bool { - if o != nil && o.CpuFamily != nil { +// HasTemplateUuid returns a boolean if a field has been set. +func (o *ServerProperties) HasTemplateUuid() bool { + if o != nil && o.TemplateUuid != nil { return true } @@ -397,7 +399,7 @@ func (o *ServerProperties) HasCpuFamily() bool { } // GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *ServerProperties) GetType() *string { if o == nil { return nil @@ -434,38 +436,90 @@ func (o *ServerProperties) HasType() bool { return false } -func (o ServerProperties) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} - if o.TemplateUuid != nil { - toSerialize["templateUuid"] = o.TemplateUuid - } - if o.Name != nil { - toSerialize["name"] = o.Name +// GetVmState returns the VmState field value +// If the value is explicit nil, nil is returned +func (o *ServerProperties) GetVmState() *string { + if o == nil { + return nil } - if o.Cores != nil { - toSerialize["cores"] = o.Cores + + return o.VmState + +} + +// GetVmStateOk returns a tuple with the VmState field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *ServerProperties) GetVmStateOk() (*string, bool) { + if o == nil { + return nil, false } - if o.Ram != nil { - toSerialize["ram"] = o.Ram + + return o.VmState, true +} + +// SetVmState sets field value +func (o *ServerProperties) SetVmState(v string) { + + o.VmState = &v + +} + +// HasVmState returns a boolean if a field has been set. +func (o *ServerProperties) HasVmState() bool { + if o != nil && o.VmState != nil { + return true } + + return false +} + +func (o ServerProperties) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} if o.AvailabilityZone != nil { toSerialize["availabilityZone"] = o.AvailabilityZone } - if o.VmState != nil { - toSerialize["vmState"] = o.VmState - } + if o.BootCdrom != nil { toSerialize["bootCdrom"] = o.BootCdrom } + if o.BootVolume != nil { toSerialize["bootVolume"] = o.BootVolume } + + if o.Cores != nil { + toSerialize["cores"] = o.Cores + } + if o.CpuFamily != nil { toSerialize["cpuFamily"] = o.CpuFamily } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.PlacementGroupId != nil { + toSerialize["placementGroupId"] = o.PlacementGroupId + } + + if o.Ram != nil { + toSerialize["ram"] = o.Ram + } + + if o.TemplateUuid != nil { + toSerialize["templateUuid"] = o.TemplateUuid + } + if o.Type != nil { toSerialize["type"] = o.Type } + + if o.VmState != nil { + toSerialize["vmState"] = o.VmState + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_servers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_servers.go index 86fe0d2de..789aeaeb5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_servers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_servers.go @@ -16,19 +16,19 @@ import ( // Servers struct for Servers type Servers struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Server `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewServers instantiates a new Servers object @@ -49,114 +49,114 @@ func NewServersWithDefaults() *Servers { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Servers) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Servers) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Servers) GetIdOk() (*string, bool) { +func (o *Servers) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Servers) SetId(v string) { +// SetLinks sets field value +func (o *Servers) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Servers) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Servers) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Servers) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Servers) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Servers) GetTypeOk() (*Type, bool) { +func (o *Servers) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Servers) SetType(v Type) { +// SetHref sets field value +func (o *Servers) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Servers) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Servers) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Servers) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Servers) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Servers) GetHrefOk() (*string, bool) { +func (o *Servers) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Servers) SetHref(v string) { +// SetId sets field value +func (o *Servers) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Servers) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Servers) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Servers) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Server will be returned +// If the value is explicit nil, nil is returned func (o *Servers) GetItems() *[]Server { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Servers) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Servers) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Servers) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Servers) GetOffsetOk() (*float32, bool) { +func (o *Servers) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Servers) SetOffset(v float32) { +// SetLimit sets field value +func (o *Servers) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Servers) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Servers) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Servers) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Servers) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Servers) GetLimitOk() (*float32, bool) { +func (o *Servers) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Servers) SetLimit(v float32) { +// SetOffset sets field value +func (o *Servers) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Servers) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Servers) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Servers) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Servers) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Servers) GetLinksOk() (*PaginationLinks, bool) { +func (o *Servers) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Servers) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Servers) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Servers) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Servers) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Servers) HasLinks() bool { func (o Servers) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot.go index 56ebb960d..c42cc0546 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot.go @@ -16,14 +16,14 @@ import ( // Snapshot struct for Snapshot type Snapshot struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *SnapshotProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewSnapshot instantiates a new Snapshot object @@ -46,190 +46,190 @@ func NewSnapshotWithDefaults() *Snapshot { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Snapshot) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Snapshot) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshot) GetIdOk() (*string, bool) { +func (o *Snapshot) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Snapshot) SetId(v string) { +// SetHref sets field value +func (o *Snapshot) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Snapshot) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Snapshot) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Snapshot) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Snapshot) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshot) GetTypeOk() (*Type, bool) { +func (o *Snapshot) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Snapshot) SetType(v Type) { +// SetId sets field value +func (o *Snapshot) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Snapshot) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Snapshot) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Snapshot) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Snapshot) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshot) GetHrefOk() (*string, bool) { +func (o *Snapshot) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Snapshot) SetHref(v string) { +// SetMetadata sets field value +func (o *Snapshot) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Snapshot) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Snapshot) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *Snapshot) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Snapshot) GetProperties() *SnapshotProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshot) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *Snapshot) GetPropertiesOk() (*SnapshotProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Snapshot) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *Snapshot) SetProperties(v SnapshotProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Snapshot) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Snapshot) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for SnapshotProperties will be returned -func (o *Snapshot) GetProperties() *SnapshotProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Snapshot) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshot) GetPropertiesOk() (*SnapshotProperties, bool) { +func (o *Snapshot) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Snapshot) SetProperties(v SnapshotProperties) { +// SetType sets field value +func (o *Snapshot) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Snapshot) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Snapshot) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Snapshot) HasProperties() bool { func (o Snapshot) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot_properties.go index b42dca20c..5eb8cea6e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshot_properties.go @@ -16,38 +16,38 @@ import ( // SnapshotProperties struct for SnapshotProperties type SnapshotProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` - // Human-readable description. - Description *string `json:"description,omitempty"` - // Location of that image/snapshot. - Location *string `json:"location,omitempty"` - // The size of the image in GB. - Size *float32 `json:"size,omitempty"` - // Boolean value representing if the snapshot requires extra protection, such as two-step verification. - SecAuthProtection *bool `json:"secAuthProtection,omitempty"` // Hot-plug capable CPU (no reboot required). CpuHotPlug *bool `json:"cpuHotPlug,omitempty"` // Hot-unplug capable CPU (no reboot required). CpuHotUnplug *bool `json:"cpuHotUnplug,omitempty"` - // Hot-plug capable RAM (no reboot required). - RamHotPlug *bool `json:"ramHotPlug,omitempty"` - // Hot-unplug capable RAM (no reboot required). - RamHotUnplug *bool `json:"ramHotUnplug,omitempty"` - // Hot-plug capable NIC (no reboot required). - NicHotPlug *bool `json:"nicHotPlug,omitempty"` - // Hot-unplug capable NIC (no reboot required). - NicHotUnplug *bool `json:"nicHotUnplug,omitempty"` - // Hot-plug capable Virt-IO drive (no reboot required). - DiscVirtioHotPlug *bool `json:"discVirtioHotPlug,omitempty"` - // Hot-unplug capable Virt-IO drive (no reboot required). Not supported with Windows VMs. - DiscVirtioHotUnplug *bool `json:"discVirtioHotUnplug,omitempty"` + // Human-readable description. + Description *string `json:"description,omitempty"` // Hot-plug capable SCSI drive (no reboot required). DiscScsiHotPlug *bool `json:"discScsiHotPlug,omitempty"` // Is capable of SCSI drive hot unplug (no reboot required). This works only for non-Windows virtual Machines. DiscScsiHotUnplug *bool `json:"discScsiHotUnplug,omitempty"` + // Hot-plug capable Virt-IO drive (no reboot required). + DiscVirtioHotPlug *bool `json:"discVirtioHotPlug,omitempty"` + // Hot-unplug capable Virt-IO drive (no reboot required). Not supported with Windows VMs. + DiscVirtioHotUnplug *bool `json:"discVirtioHotUnplug,omitempty"` // OS type of this snapshot LicenceType *string `json:"licenceType,omitempty"` + // Location of that image/snapshot. + Location *string `json:"location,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` + // Hot-plug capable NIC (no reboot required). + NicHotPlug *bool `json:"nicHotPlug,omitempty"` + // Hot-unplug capable NIC (no reboot required). + NicHotUnplug *bool `json:"nicHotUnplug,omitempty"` + // Hot-plug capable RAM (no reboot required). + RamHotPlug *bool `json:"ramHotPlug,omitempty"` + // Hot-unplug capable RAM (no reboot required). + RamHotUnplug *bool `json:"ramHotUnplug,omitempty"` + // Boolean value representing if the snapshot requires extra protection, such as two-step verification. + SecAuthProtection *bool `json:"secAuthProtection,omitempty"` + // The size of the image in GB. + Size *float32 `json:"size,omitempty"` } // NewSnapshotProperties instantiates a new SnapshotProperties object @@ -68,38 +68,76 @@ func NewSnapshotPropertiesWithDefaults() *SnapshotProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *SnapshotProperties) GetName() *string { +// GetCpuHotPlug returns the CpuHotPlug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetCpuHotPlug() *bool { if o == nil { return nil } - return o.Name + return o.CpuHotPlug } -// GetNameOk returns a tuple with the Name field value +// GetCpuHotPlugOk returns a tuple with the CpuHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetNameOk() (*string, bool) { +func (o *SnapshotProperties) GetCpuHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Name, true + return o.CpuHotPlug, true } -// SetName sets field value -func (o *SnapshotProperties) SetName(v string) { +// SetCpuHotPlug sets field value +func (o *SnapshotProperties) SetCpuHotPlug(v bool) { - o.Name = &v + o.CpuHotPlug = &v } -// HasName returns a boolean if a field has been set. -func (o *SnapshotProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasCpuHotPlug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasCpuHotPlug() bool { + if o != nil && o.CpuHotPlug != nil { + return true + } + + return false +} + +// GetCpuHotUnplug returns the CpuHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetCpuHotUnplug() *bool { + if o == nil { + return nil + } + + return o.CpuHotUnplug + +} + +// GetCpuHotUnplugOk returns a tuple with the CpuHotUnplug field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *SnapshotProperties) GetCpuHotUnplugOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.CpuHotUnplug, true +} + +// SetCpuHotUnplug sets field value +func (o *SnapshotProperties) SetCpuHotUnplug(v bool) { + + o.CpuHotUnplug = &v + +} + +// HasCpuHotUnplug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasCpuHotUnplug() bool { + if o != nil && o.CpuHotUnplug != nil { return true } @@ -107,7 +145,7 @@ func (o *SnapshotProperties) HasName() bool { } // GetDescription returns the Description field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *SnapshotProperties) GetDescription() *string { if o == nil { return nil @@ -144,266 +182,266 @@ func (o *SnapshotProperties) HasDescription() bool { return false } -// GetLocation returns the Location field value -// If the value is explicit nil, the zero value for string will be returned -func (o *SnapshotProperties) GetLocation() *string { +// GetDiscScsiHotPlug returns the DiscScsiHotPlug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetDiscScsiHotPlug() *bool { if o == nil { return nil } - return o.Location + return o.DiscScsiHotPlug } -// GetLocationOk returns a tuple with the Location field value +// GetDiscScsiHotPlugOk returns a tuple with the DiscScsiHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetLocationOk() (*string, bool) { +func (o *SnapshotProperties) GetDiscScsiHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Location, true + return o.DiscScsiHotPlug, true } -// SetLocation sets field value -func (o *SnapshotProperties) SetLocation(v string) { +// SetDiscScsiHotPlug sets field value +func (o *SnapshotProperties) SetDiscScsiHotPlug(v bool) { - o.Location = &v + o.DiscScsiHotPlug = &v } -// HasLocation returns a boolean if a field has been set. -func (o *SnapshotProperties) HasLocation() bool { - if o != nil && o.Location != nil { +// HasDiscScsiHotPlug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasDiscScsiHotPlug() bool { + if o != nil && o.DiscScsiHotPlug != nil { return true } return false } -// GetSize returns the Size field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *SnapshotProperties) GetSize() *float32 { +// GetDiscScsiHotUnplug returns the DiscScsiHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetDiscScsiHotUnplug() *bool { if o == nil { return nil } - return o.Size + return o.DiscScsiHotUnplug } -// GetSizeOk returns a tuple with the Size field value +// GetDiscScsiHotUnplugOk returns a tuple with the DiscScsiHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetSizeOk() (*float32, bool) { +func (o *SnapshotProperties) GetDiscScsiHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Size, true + return o.DiscScsiHotUnplug, true } -// SetSize sets field value -func (o *SnapshotProperties) SetSize(v float32) { +// SetDiscScsiHotUnplug sets field value +func (o *SnapshotProperties) SetDiscScsiHotUnplug(v bool) { - o.Size = &v + o.DiscScsiHotUnplug = &v } -// HasSize returns a boolean if a field has been set. -func (o *SnapshotProperties) HasSize() bool { - if o != nil && o.Size != nil { +// HasDiscScsiHotUnplug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasDiscScsiHotUnplug() bool { + if o != nil && o.DiscScsiHotUnplug != nil { return true } return false } -// GetSecAuthProtection returns the SecAuthProtection field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetSecAuthProtection() *bool { +// GetDiscVirtioHotPlug returns the DiscVirtioHotPlug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetDiscVirtioHotPlug() *bool { if o == nil { return nil } - return o.SecAuthProtection + return o.DiscVirtioHotPlug } -// GetSecAuthProtectionOk returns a tuple with the SecAuthProtection field value +// GetDiscVirtioHotPlugOk returns a tuple with the DiscVirtioHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetSecAuthProtectionOk() (*bool, bool) { +func (o *SnapshotProperties) GetDiscVirtioHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.SecAuthProtection, true + return o.DiscVirtioHotPlug, true } -// SetSecAuthProtection sets field value -func (o *SnapshotProperties) SetSecAuthProtection(v bool) { +// SetDiscVirtioHotPlug sets field value +func (o *SnapshotProperties) SetDiscVirtioHotPlug(v bool) { - o.SecAuthProtection = &v + o.DiscVirtioHotPlug = &v } -// HasSecAuthProtection returns a boolean if a field has been set. -func (o *SnapshotProperties) HasSecAuthProtection() bool { - if o != nil && o.SecAuthProtection != nil { +// HasDiscVirtioHotPlug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasDiscVirtioHotPlug() bool { + if o != nil && o.DiscVirtioHotPlug != nil { return true } return false } -// GetCpuHotPlug returns the CpuHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetCpuHotPlug() *bool { +// GetDiscVirtioHotUnplug returns the DiscVirtioHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetDiscVirtioHotUnplug() *bool { if o == nil { return nil } - return o.CpuHotPlug + return o.DiscVirtioHotUnplug } -// GetCpuHotPlugOk returns a tuple with the CpuHotPlug field value +// GetDiscVirtioHotUnplugOk returns a tuple with the DiscVirtioHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetCpuHotPlugOk() (*bool, bool) { +func (o *SnapshotProperties) GetDiscVirtioHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.CpuHotPlug, true + return o.DiscVirtioHotUnplug, true } -// SetCpuHotPlug sets field value -func (o *SnapshotProperties) SetCpuHotPlug(v bool) { +// SetDiscVirtioHotUnplug sets field value +func (o *SnapshotProperties) SetDiscVirtioHotUnplug(v bool) { - o.CpuHotPlug = &v + o.DiscVirtioHotUnplug = &v } -// HasCpuHotPlug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasCpuHotPlug() bool { - if o != nil && o.CpuHotPlug != nil { +// HasDiscVirtioHotUnplug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasDiscVirtioHotUnplug() bool { + if o != nil && o.DiscVirtioHotUnplug != nil { return true } return false } -// GetCpuHotUnplug returns the CpuHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetCpuHotUnplug() *bool { +// GetLicenceType returns the LicenceType field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetLicenceType() *string { if o == nil { return nil } - return o.CpuHotUnplug + return o.LicenceType } -// GetCpuHotUnplugOk returns a tuple with the CpuHotUnplug field value +// GetLicenceTypeOk returns a tuple with the LicenceType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetCpuHotUnplugOk() (*bool, bool) { +func (o *SnapshotProperties) GetLicenceTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.CpuHotUnplug, true + return o.LicenceType, true } -// SetCpuHotUnplug sets field value -func (o *SnapshotProperties) SetCpuHotUnplug(v bool) { +// SetLicenceType sets field value +func (o *SnapshotProperties) SetLicenceType(v string) { - o.CpuHotUnplug = &v + o.LicenceType = &v } -// HasCpuHotUnplug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasCpuHotUnplug() bool { - if o != nil && o.CpuHotUnplug != nil { +// HasLicenceType returns a boolean if a field has been set. +func (o *SnapshotProperties) HasLicenceType() bool { + if o != nil && o.LicenceType != nil { return true } return false } -// GetRamHotPlug returns the RamHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetRamHotPlug() *bool { +// GetLocation returns the Location field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetLocation() *string { if o == nil { return nil } - return o.RamHotPlug + return o.Location } -// GetRamHotPlugOk returns a tuple with the RamHotPlug field value +// GetLocationOk returns a tuple with the Location field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetRamHotPlugOk() (*bool, bool) { +func (o *SnapshotProperties) GetLocationOk() (*string, bool) { if o == nil { return nil, false } - return o.RamHotPlug, true + return o.Location, true } -// SetRamHotPlug sets field value -func (o *SnapshotProperties) SetRamHotPlug(v bool) { +// SetLocation sets field value +func (o *SnapshotProperties) SetLocation(v string) { - o.RamHotPlug = &v + o.Location = &v } -// HasRamHotPlug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasRamHotPlug() bool { - if o != nil && o.RamHotPlug != nil { +// HasLocation returns a boolean if a field has been set. +func (o *SnapshotProperties) HasLocation() bool { + if o != nil && o.Location != nil { return true } return false } -// GetRamHotUnplug returns the RamHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetRamHotUnplug() *bool { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetName() *string { if o == nil { return nil } - return o.RamHotUnplug + return o.Name } -// GetRamHotUnplugOk returns a tuple with the RamHotUnplug field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetRamHotUnplugOk() (*bool, bool) { +func (o *SnapshotProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.RamHotUnplug, true + return o.Name, true } -// SetRamHotUnplug sets field value -func (o *SnapshotProperties) SetRamHotUnplug(v bool) { +// SetName sets field value +func (o *SnapshotProperties) SetName(v string) { - o.RamHotUnplug = &v + o.Name = &v } -// HasRamHotUnplug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasRamHotUnplug() bool { - if o != nil && o.RamHotUnplug != nil { +// HasName returns a boolean if a field has been set. +func (o *SnapshotProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -411,7 +449,7 @@ func (o *SnapshotProperties) HasRamHotUnplug() bool { } // GetNicHotPlug returns the NicHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *SnapshotProperties) GetNicHotPlug() *bool { if o == nil { return nil @@ -449,7 +487,7 @@ func (o *SnapshotProperties) HasNicHotPlug() bool { } // GetNicHotUnplug returns the NicHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *SnapshotProperties) GetNicHotUnplug() *bool { if o == nil { return nil @@ -486,246 +524,224 @@ func (o *SnapshotProperties) HasNicHotUnplug() bool { return false } -// GetDiscVirtioHotPlug returns the DiscVirtioHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetDiscVirtioHotPlug() *bool { +// GetRamHotPlug returns the RamHotPlug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetRamHotPlug() *bool { if o == nil { return nil } - return o.DiscVirtioHotPlug + return o.RamHotPlug } -// GetDiscVirtioHotPlugOk returns a tuple with the DiscVirtioHotPlug field value +// GetRamHotPlugOk returns a tuple with the RamHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetDiscVirtioHotPlugOk() (*bool, bool) { +func (o *SnapshotProperties) GetRamHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.DiscVirtioHotPlug, true + return o.RamHotPlug, true } -// SetDiscVirtioHotPlug sets field value -func (o *SnapshotProperties) SetDiscVirtioHotPlug(v bool) { +// SetRamHotPlug sets field value +func (o *SnapshotProperties) SetRamHotPlug(v bool) { - o.DiscVirtioHotPlug = &v + o.RamHotPlug = &v } -// HasDiscVirtioHotPlug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasDiscVirtioHotPlug() bool { - if o != nil && o.DiscVirtioHotPlug != nil { +// HasRamHotPlug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasRamHotPlug() bool { + if o != nil && o.RamHotPlug != nil { return true } return false } -// GetDiscVirtioHotUnplug returns the DiscVirtioHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetDiscVirtioHotUnplug() *bool { +// GetRamHotUnplug returns the RamHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetRamHotUnplug() *bool { if o == nil { return nil } - return o.DiscVirtioHotUnplug + return o.RamHotUnplug } -// GetDiscVirtioHotUnplugOk returns a tuple with the DiscVirtioHotUnplug field value +// GetRamHotUnplugOk returns a tuple with the RamHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetDiscVirtioHotUnplugOk() (*bool, bool) { +func (o *SnapshotProperties) GetRamHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.DiscVirtioHotUnplug, true + return o.RamHotUnplug, true } -// SetDiscVirtioHotUnplug sets field value -func (o *SnapshotProperties) SetDiscVirtioHotUnplug(v bool) { +// SetRamHotUnplug sets field value +func (o *SnapshotProperties) SetRamHotUnplug(v bool) { - o.DiscVirtioHotUnplug = &v + o.RamHotUnplug = &v } -// HasDiscVirtioHotUnplug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasDiscVirtioHotUnplug() bool { - if o != nil && o.DiscVirtioHotUnplug != nil { +// HasRamHotUnplug returns a boolean if a field has been set. +func (o *SnapshotProperties) HasRamHotUnplug() bool { + if o != nil && o.RamHotUnplug != nil { return true } return false } -// GetDiscScsiHotPlug returns the DiscScsiHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetDiscScsiHotPlug() *bool { +// GetSecAuthProtection returns the SecAuthProtection field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetSecAuthProtection() *bool { if o == nil { return nil } - return o.DiscScsiHotPlug + return o.SecAuthProtection } -// GetDiscScsiHotPlugOk returns a tuple with the DiscScsiHotPlug field value +// GetSecAuthProtectionOk returns a tuple with the SecAuthProtection field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetDiscScsiHotPlugOk() (*bool, bool) { +func (o *SnapshotProperties) GetSecAuthProtectionOk() (*bool, bool) { if o == nil { return nil, false } - return o.DiscScsiHotPlug, true + return o.SecAuthProtection, true } -// SetDiscScsiHotPlug sets field value -func (o *SnapshotProperties) SetDiscScsiHotPlug(v bool) { +// SetSecAuthProtection sets field value +func (o *SnapshotProperties) SetSecAuthProtection(v bool) { - o.DiscScsiHotPlug = &v + o.SecAuthProtection = &v } -// HasDiscScsiHotPlug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasDiscScsiHotPlug() bool { - if o != nil && o.DiscScsiHotPlug != nil { +// HasSecAuthProtection returns a boolean if a field has been set. +func (o *SnapshotProperties) HasSecAuthProtection() bool { + if o != nil && o.SecAuthProtection != nil { return true } return false } -// GetDiscScsiHotUnplug returns the DiscScsiHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *SnapshotProperties) GetDiscScsiHotUnplug() *bool { +// GetSize returns the Size field value +// If the value is explicit nil, nil is returned +func (o *SnapshotProperties) GetSize() *float32 { if o == nil { return nil } - return o.DiscScsiHotUnplug + return o.Size } -// GetDiscScsiHotUnplugOk returns a tuple with the DiscScsiHotUnplug field value +// GetSizeOk returns a tuple with the Size field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetDiscScsiHotUnplugOk() (*bool, bool) { +func (o *SnapshotProperties) GetSizeOk() (*float32, bool) { if o == nil { return nil, false } - return o.DiscScsiHotUnplug, true + return o.Size, true } -// SetDiscScsiHotUnplug sets field value -func (o *SnapshotProperties) SetDiscScsiHotUnplug(v bool) { +// SetSize sets field value +func (o *SnapshotProperties) SetSize(v float32) { - o.DiscScsiHotUnplug = &v + o.Size = &v } -// HasDiscScsiHotUnplug returns a boolean if a field has been set. -func (o *SnapshotProperties) HasDiscScsiHotUnplug() bool { - if o != nil && o.DiscScsiHotUnplug != nil { +// HasSize returns a boolean if a field has been set. +func (o *SnapshotProperties) HasSize() bool { + if o != nil && o.Size != nil { return true } return false } -// GetLicenceType returns the LicenceType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *SnapshotProperties) GetLicenceType() *string { - if o == nil { - return nil +func (o SnapshotProperties) MarshalJSON() ([]byte, error) { + toSerialize := map[string]interface{}{} + if o.CpuHotPlug != nil { + toSerialize["cpuHotPlug"] = o.CpuHotPlug } - return o.LicenceType - -} - -// GetLicenceTypeOk returns a tuple with the LicenceType field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *SnapshotProperties) GetLicenceTypeOk() (*string, bool) { - if o == nil { - return nil, false + if o.CpuHotUnplug != nil { + toSerialize["cpuHotUnplug"] = o.CpuHotUnplug } - return o.LicenceType, true -} - -// SetLicenceType sets field value -func (o *SnapshotProperties) SetLicenceType(v string) { - - o.LicenceType = &v + if o.Description != nil { + toSerialize["description"] = o.Description + } -} + if o.DiscScsiHotPlug != nil { + toSerialize["discScsiHotPlug"] = o.DiscScsiHotPlug + } -// HasLicenceType returns a boolean if a field has been set. -func (o *SnapshotProperties) HasLicenceType() bool { - if o != nil && o.LicenceType != nil { - return true + if o.DiscScsiHotUnplug != nil { + toSerialize["discScsiHotUnplug"] = o.DiscScsiHotUnplug } - return false -} + if o.DiscVirtioHotPlug != nil { + toSerialize["discVirtioHotPlug"] = o.DiscVirtioHotPlug + } -func (o SnapshotProperties) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.DiscVirtioHotUnplug != nil { + toSerialize["discVirtioHotUnplug"] = o.DiscVirtioHotUnplug } - if o.Description != nil { - toSerialize["description"] = o.Description + + if o.LicenceType != nil { + toSerialize["licenceType"] = o.LicenceType } + if o.Location != nil { toSerialize["location"] = o.Location } - if o.Size != nil { - toSerialize["size"] = o.Size - } - if o.SecAuthProtection != nil { - toSerialize["secAuthProtection"] = o.SecAuthProtection - } - if o.CpuHotPlug != nil { - toSerialize["cpuHotPlug"] = o.CpuHotPlug - } - if o.CpuHotUnplug != nil { - toSerialize["cpuHotUnplug"] = o.CpuHotUnplug - } - if o.RamHotPlug != nil { - toSerialize["ramHotPlug"] = o.RamHotPlug - } - if o.RamHotUnplug != nil { - toSerialize["ramHotUnplug"] = o.RamHotUnplug + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.NicHotPlug != nil { toSerialize["nicHotPlug"] = o.NicHotPlug } + if o.NicHotUnplug != nil { toSerialize["nicHotUnplug"] = o.NicHotUnplug } - if o.DiscVirtioHotPlug != nil { - toSerialize["discVirtioHotPlug"] = o.DiscVirtioHotPlug - } - if o.DiscVirtioHotUnplug != nil { - toSerialize["discVirtioHotUnplug"] = o.DiscVirtioHotUnplug + + if o.RamHotPlug != nil { + toSerialize["ramHotPlug"] = o.RamHotPlug } - if o.DiscScsiHotPlug != nil { - toSerialize["discScsiHotPlug"] = o.DiscScsiHotPlug + + if o.RamHotUnplug != nil { + toSerialize["ramHotUnplug"] = o.RamHotUnplug } - if o.DiscScsiHotUnplug != nil { - toSerialize["discScsiHotUnplug"] = o.DiscScsiHotUnplug + + if o.SecAuthProtection != nil { + toSerialize["secAuthProtection"] = o.SecAuthProtection } - if o.LicenceType != nil { - toSerialize["licenceType"] = o.LicenceType + + if o.Size != nil { + toSerialize["size"] = o.Size } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshots.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshots.go index 8578d03dd..313d03e09 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshots.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_snapshots.go @@ -16,14 +16,14 @@ import ( // Snapshots struct for Snapshots type Snapshots struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Snapshot `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewSnapshots instantiates a new Snapshots object @@ -44,152 +44,152 @@ func NewSnapshotsWithDefaults() *Snapshots { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Snapshots) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Snapshots) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshots) GetIdOk() (*string, bool) { +func (o *Snapshots) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Snapshots) SetId(v string) { +// SetHref sets field value +func (o *Snapshots) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Snapshots) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Snapshots) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Snapshots) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Snapshots) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshots) GetTypeOk() (*Type, bool) { +func (o *Snapshots) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Snapshots) SetType(v Type) { +// SetId sets field value +func (o *Snapshots) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Snapshots) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Snapshots) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Snapshots) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Snapshots) GetItems() *[]Snapshot { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshots) GetHrefOk() (*string, bool) { +func (o *Snapshots) GetItemsOk() (*[]Snapshot, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Snapshots) SetHref(v string) { +// SetItems sets field value +func (o *Snapshots) SetItems(v []Snapshot) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Snapshots) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Snapshots) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Snapshot will be returned -func (o *Snapshots) GetItems() *[]Snapshot { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Snapshots) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Snapshots) GetItemsOk() (*[]Snapshot, bool) { +func (o *Snapshots) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Snapshots) SetItems(v []Snapshot) { +// SetType sets field value +func (o *Snapshots) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Snapshots) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Snapshots) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Snapshots) HasItems() bool { func (o Snapshots) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go index 95d71efea..cbfb0271f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go @@ -16,14 +16,14 @@ import ( // TargetGroup struct for TargetGroup type TargetGroup struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *TargetGroupProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewTargetGroup instantiates a new TargetGroup object @@ -46,190 +46,190 @@ func NewTargetGroupWithDefaults() *TargetGroup { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroup) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *TargetGroup) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroup) GetIdOk() (*string, bool) { +func (o *TargetGroup) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *TargetGroup) SetId(v string) { +// SetHref sets field value +func (o *TargetGroup) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *TargetGroup) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *TargetGroup) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *TargetGroup) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *TargetGroup) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroup) GetTypeOk() (*Type, bool) { +func (o *TargetGroup) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *TargetGroup) SetType(v Type) { +// SetId sets field value +func (o *TargetGroup) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *TargetGroup) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *TargetGroup) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroup) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *TargetGroup) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroup) GetHrefOk() (*string, bool) { +func (o *TargetGroup) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *TargetGroup) SetHref(v string) { +// SetMetadata sets field value +func (o *TargetGroup) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *TargetGroup) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *TargetGroup) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *TargetGroup) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *TargetGroup) GetProperties() *TargetGroupProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroup) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *TargetGroup) GetPropertiesOk() (*TargetGroupProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *TargetGroup) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *TargetGroup) SetProperties(v TargetGroupProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *TargetGroup) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *TargetGroup) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for TargetGroupProperties will be returned -func (o *TargetGroup) GetProperties() *TargetGroupProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *TargetGroup) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroup) GetPropertiesOk() (*TargetGroupProperties, bool) { +func (o *TargetGroup) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *TargetGroup) SetProperties(v TargetGroupProperties) { +// SetType sets field value +func (o *TargetGroup) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *TargetGroup) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *TargetGroup) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *TargetGroup) HasProperties() bool { func (o TargetGroup) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go index 5c8dfdfc6..70ca3dc63 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go @@ -16,10 +16,10 @@ import ( // TargetGroupHealthCheck struct for TargetGroupHealthCheck type TargetGroupHealthCheck struct { - // The maximum time in milliseconds is to wait for a target to respond to a check. For target VMs with a 'Check Interval' set, the smaller of the two values is used once the TCP connection is established. - CheckTimeout *int32 `json:"checkTimeout,omitempty"` // The interval in milliseconds between consecutive health checks; the default value is '2000'. CheckInterval *int32 `json:"checkInterval,omitempty"` + // The maximum time in milliseconds is to wait for a target to respond to a check. For target VMs with a 'Check Interval' set, the smaller of the two values is used once the TCP connection is established. + CheckTimeout *int32 `json:"checkTimeout,omitempty"` // The maximum number of attempts to reconnect to a target after a connection failure. The valid range is '0 to 65535'; the default value is '3'. Retries *int32 `json:"retries,omitempty"` } @@ -42,76 +42,76 @@ func NewTargetGroupHealthCheckWithDefaults() *TargetGroupHealthCheck { return &this } -// GetCheckTimeout returns the CheckTimeout field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *TargetGroupHealthCheck) GetCheckTimeout() *int32 { +// GetCheckInterval returns the CheckInterval field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupHealthCheck) GetCheckInterval() *int32 { if o == nil { return nil } - return o.CheckTimeout + return o.CheckInterval } -// GetCheckTimeoutOk returns a tuple with the CheckTimeout field value +// GetCheckIntervalOk returns a tuple with the CheckInterval field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupHealthCheck) GetCheckTimeoutOk() (*int32, bool) { +func (o *TargetGroupHealthCheck) GetCheckIntervalOk() (*int32, bool) { if o == nil { return nil, false } - return o.CheckTimeout, true + return o.CheckInterval, true } -// SetCheckTimeout sets field value -func (o *TargetGroupHealthCheck) SetCheckTimeout(v int32) { +// SetCheckInterval sets field value +func (o *TargetGroupHealthCheck) SetCheckInterval(v int32) { - o.CheckTimeout = &v + o.CheckInterval = &v } -// HasCheckTimeout returns a boolean if a field has been set. -func (o *TargetGroupHealthCheck) HasCheckTimeout() bool { - if o != nil && o.CheckTimeout != nil { +// HasCheckInterval returns a boolean if a field has been set. +func (o *TargetGroupHealthCheck) HasCheckInterval() bool { + if o != nil && o.CheckInterval != nil { return true } return false } -// GetCheckInterval returns the CheckInterval field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *TargetGroupHealthCheck) GetCheckInterval() *int32 { +// GetCheckTimeout returns the CheckTimeout field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupHealthCheck) GetCheckTimeout() *int32 { if o == nil { return nil } - return o.CheckInterval + return o.CheckTimeout } -// GetCheckIntervalOk returns a tuple with the CheckInterval field value +// GetCheckTimeoutOk returns a tuple with the CheckTimeout field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupHealthCheck) GetCheckIntervalOk() (*int32, bool) { +func (o *TargetGroupHealthCheck) GetCheckTimeoutOk() (*int32, bool) { if o == nil { return nil, false } - return o.CheckInterval, true + return o.CheckTimeout, true } -// SetCheckInterval sets field value -func (o *TargetGroupHealthCheck) SetCheckInterval(v int32) { +// SetCheckTimeout sets field value +func (o *TargetGroupHealthCheck) SetCheckTimeout(v int32) { - o.CheckInterval = &v + o.CheckTimeout = &v } -// HasCheckInterval returns a boolean if a field has been set. -func (o *TargetGroupHealthCheck) HasCheckInterval() bool { - if o != nil && o.CheckInterval != nil { +// HasCheckTimeout returns a boolean if a field has been set. +func (o *TargetGroupHealthCheck) HasCheckTimeout() bool { + if o != nil && o.CheckTimeout != nil { return true } @@ -119,7 +119,7 @@ func (o *TargetGroupHealthCheck) HasCheckInterval() bool { } // GetRetries returns the Retries field value -// If the value is explicit nil, the zero value for int32 will be returned +// If the value is explicit nil, nil is returned func (o *TargetGroupHealthCheck) GetRetries() *int32 { if o == nil { return nil @@ -158,15 +158,18 @@ func (o *TargetGroupHealthCheck) HasRetries() bool { func (o TargetGroupHealthCheck) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.CheckTimeout != nil { - toSerialize["checkTimeout"] = o.CheckTimeout - } if o.CheckInterval != nil { toSerialize["checkInterval"] = o.CheckInterval } + + if o.CheckTimeout != nil { + toSerialize["checkTimeout"] = o.CheckTimeout + } + if o.Retries != nil { toSerialize["retries"] = o.Retries } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go index 274f7b911..dc31f4950 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go @@ -16,18 +16,18 @@ import ( // TargetGroupHttpHealthCheck struct for TargetGroupHttpHealthCheck type TargetGroupHttpHealthCheck struct { - // The destination URL for HTTP the health check; the default is '/'. - Path *string `json:"path,omitempty"` - // The method used for the health check request. - Method *string `json:"method,omitempty"` // Specify the target's response type to match ALB's request. MatchType *string `json:"matchType"` - // The response returned by the request. It can be a status code or a response body depending on the definition of 'matchType'. - Response *string `json:"response"` - // Specifies whether to use a regular expression to parse the response body; the default value is 'FALSE'. By using regular expressions, you can flexibly customize the expected response from a healthy server. - Regex *bool `json:"regex,omitempty"` + // The method used for the health check request. + Method *string `json:"method,omitempty"` // Specifies whether to negate an individual entry; the default value is 'FALSE'. Negate *bool `json:"negate,omitempty"` + // The destination URL for HTTP the health check; the default is '/'. + Path *string `json:"path,omitempty"` + // Specifies whether to use a regular expression to parse the response body; the default value is 'FALSE'. By using regular expressions, you can flexibly customize the expected response from a healthy server. + Regex *bool `json:"regex,omitempty"` + // The response returned by the request. It can be a status code or a response body depending on the definition of 'matchType'. + Response *string `json:"response"` } // NewTargetGroupHttpHealthCheck instantiates a new TargetGroupHttpHealthCheck object @@ -51,38 +51,38 @@ func NewTargetGroupHttpHealthCheckWithDefaults() *TargetGroupHttpHealthCheck { return &this } -// GetPath returns the Path field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupHttpHealthCheck) GetPath() *string { +// GetMatchType returns the MatchType field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupHttpHealthCheck) GetMatchType() *string { if o == nil { return nil } - return o.Path + return o.MatchType } -// GetPathOk returns a tuple with the Path field value +// GetMatchTypeOk returns a tuple with the MatchType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupHttpHealthCheck) GetPathOk() (*string, bool) { +func (o *TargetGroupHttpHealthCheck) GetMatchTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.Path, true + return o.MatchType, true } -// SetPath sets field value -func (o *TargetGroupHttpHealthCheck) SetPath(v string) { +// SetMatchType sets field value +func (o *TargetGroupHttpHealthCheck) SetMatchType(v string) { - o.Path = &v + o.MatchType = &v } -// HasPath returns a boolean if a field has been set. -func (o *TargetGroupHttpHealthCheck) HasPath() bool { - if o != nil && o.Path != nil { +// HasMatchType returns a boolean if a field has been set. +func (o *TargetGroupHttpHealthCheck) HasMatchType() bool { + if o != nil && o.MatchType != nil { return true } @@ -90,7 +90,7 @@ func (o *TargetGroupHttpHealthCheck) HasPath() bool { } // GetMethod returns the Method field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *TargetGroupHttpHealthCheck) GetMethod() *string { if o == nil { return nil @@ -127,76 +127,76 @@ func (o *TargetGroupHttpHealthCheck) HasMethod() bool { return false } -// GetMatchType returns the MatchType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupHttpHealthCheck) GetMatchType() *string { +// GetNegate returns the Negate field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupHttpHealthCheck) GetNegate() *bool { if o == nil { return nil } - return o.MatchType + return o.Negate } -// GetMatchTypeOk returns a tuple with the MatchType field value +// GetNegateOk returns a tuple with the Negate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupHttpHealthCheck) GetMatchTypeOk() (*string, bool) { +func (o *TargetGroupHttpHealthCheck) GetNegateOk() (*bool, bool) { if o == nil { return nil, false } - return o.MatchType, true + return o.Negate, true } -// SetMatchType sets field value -func (o *TargetGroupHttpHealthCheck) SetMatchType(v string) { +// SetNegate sets field value +func (o *TargetGroupHttpHealthCheck) SetNegate(v bool) { - o.MatchType = &v + o.Negate = &v } -// HasMatchType returns a boolean if a field has been set. -func (o *TargetGroupHttpHealthCheck) HasMatchType() bool { - if o != nil && o.MatchType != nil { +// HasNegate returns a boolean if a field has been set. +func (o *TargetGroupHttpHealthCheck) HasNegate() bool { + if o != nil && o.Negate != nil { return true } return false } -// GetResponse returns the Response field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupHttpHealthCheck) GetResponse() *string { +// GetPath returns the Path field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupHttpHealthCheck) GetPath() *string { if o == nil { return nil } - return o.Response + return o.Path } -// GetResponseOk returns a tuple with the Response field value +// GetPathOk returns a tuple with the Path field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupHttpHealthCheck) GetResponseOk() (*string, bool) { +func (o *TargetGroupHttpHealthCheck) GetPathOk() (*string, bool) { if o == nil { return nil, false } - return o.Response, true + return o.Path, true } -// SetResponse sets field value -func (o *TargetGroupHttpHealthCheck) SetResponse(v string) { +// SetPath sets field value +func (o *TargetGroupHttpHealthCheck) SetPath(v string) { - o.Response = &v + o.Path = &v } -// HasResponse returns a boolean if a field has been set. -func (o *TargetGroupHttpHealthCheck) HasResponse() bool { - if o != nil && o.Response != nil { +// HasPath returns a boolean if a field has been set. +func (o *TargetGroupHttpHealthCheck) HasPath() bool { + if o != nil && o.Path != nil { return true } @@ -204,7 +204,7 @@ func (o *TargetGroupHttpHealthCheck) HasResponse() bool { } // GetRegex returns the Regex field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *TargetGroupHttpHealthCheck) GetRegex() *bool { if o == nil { return nil @@ -241,38 +241,38 @@ func (o *TargetGroupHttpHealthCheck) HasRegex() bool { return false } -// GetNegate returns the Negate field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *TargetGroupHttpHealthCheck) GetNegate() *bool { +// GetResponse returns the Response field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupHttpHealthCheck) GetResponse() *string { if o == nil { return nil } - return o.Negate + return o.Response } -// GetNegateOk returns a tuple with the Negate field value +// GetResponseOk returns a tuple with the Response field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupHttpHealthCheck) GetNegateOk() (*bool, bool) { +func (o *TargetGroupHttpHealthCheck) GetResponseOk() (*string, bool) { if o == nil { return nil, false } - return o.Negate, true + return o.Response, true } -// SetNegate sets field value -func (o *TargetGroupHttpHealthCheck) SetNegate(v bool) { +// SetResponse sets field value +func (o *TargetGroupHttpHealthCheck) SetResponse(v string) { - o.Negate = &v + o.Response = &v } -// HasNegate returns a boolean if a field has been set. -func (o *TargetGroupHttpHealthCheck) HasNegate() bool { - if o != nil && o.Negate != nil { +// HasResponse returns a boolean if a field has been set. +func (o *TargetGroupHttpHealthCheck) HasResponse() bool { + if o != nil && o.Response != nil { return true } @@ -281,24 +281,30 @@ func (o *TargetGroupHttpHealthCheck) HasNegate() bool { func (o TargetGroupHttpHealthCheck) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Path != nil { - toSerialize["path"] = o.Path + if o.MatchType != nil { + toSerialize["matchType"] = o.MatchType } + if o.Method != nil { toSerialize["method"] = o.Method } - if o.MatchType != nil { - toSerialize["matchType"] = o.MatchType + + if o.Negate != nil { + toSerialize["negate"] = o.Negate } - if o.Response != nil { - toSerialize["response"] = o.Response + + if o.Path != nil { + toSerialize["path"] = o.Path } + if o.Regex != nil { toSerialize["regex"] = o.Regex } - if o.Negate != nil { - toSerialize["negate"] = o.Negate + + if o.Response != nil { + toSerialize["response"] = o.Response } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go index 3a79b8bc7..f14f3486f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go @@ -16,27 +16,27 @@ import ( // TargetGroupProperties struct for TargetGroupProperties type TargetGroupProperties struct { + // The balancing algorithm. A balancing algorithm consists of predefined rules with the logic that a load balancer uses to distribute network traffic between servers. - **Round Robin**: Targets are served alternately according to their weighting. - **Least Connection**: The target with the least active connection is served. - **Random**: The targets are served based on a consistent pseudorandom algorithm. - **Source IP**: It is ensured that the same client IP address reaches the same target. + Algorithm *string `json:"algorithm"` + HealthCheck *TargetGroupHealthCheck `json:"healthCheck,omitempty"` + HttpHealthCheck *TargetGroupHttpHealthCheck `json:"httpHealthCheck,omitempty"` // The target group name. Name *string `json:"name"` - // The balancing algorithm. A balancing algorithm consists of predefined rules with the logic that a load balancer uses to distribute network traffic between servers. - **Round Robin**: Targets are served alternately according to their weighting. - **Least Connection**: The target with the least active connection is served. - **Random**: The targets are served based on a consistent pseudorandom algorithm. - **Source IP**: It is ensured that the same client IP address reaches the same target. - Algorithm *string `json:"algorithm"` // The forwarding protocol. Only the value 'HTTP' is allowed. Protocol *string `json:"protocol"` // Array of items in the collection. - Targets *[]TargetGroupTarget `json:"targets,omitempty"` - HealthCheck *TargetGroupHealthCheck `json:"healthCheck,omitempty"` - HttpHealthCheck *TargetGroupHttpHealthCheck `json:"httpHealthCheck,omitempty"` + Targets *[]TargetGroupTarget `json:"targets,omitempty"` } // NewTargetGroupProperties instantiates a new TargetGroupProperties object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewTargetGroupProperties(name string, algorithm string, protocol string) *TargetGroupProperties { +func NewTargetGroupProperties(algorithm string, name string, protocol string) *TargetGroupProperties { this := TargetGroupProperties{} - this.Name = &name this.Algorithm = &algorithm + this.Name = &name this.Protocol = &protocol return &this @@ -50,228 +50,228 @@ func NewTargetGroupPropertiesWithDefaults() *TargetGroupProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupProperties) GetName() *string { +// GetAlgorithm returns the Algorithm field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupProperties) GetAlgorithm() *string { if o == nil { return nil } - return o.Name + return o.Algorithm } -// GetNameOk returns a tuple with the Name field value +// GetAlgorithmOk returns a tuple with the Algorithm field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupProperties) GetNameOk() (*string, bool) { +func (o *TargetGroupProperties) GetAlgorithmOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Algorithm, true } -// SetName sets field value -func (o *TargetGroupProperties) SetName(v string) { +// SetAlgorithm sets field value +func (o *TargetGroupProperties) SetAlgorithm(v string) { - o.Name = &v + o.Algorithm = &v } -// HasName returns a boolean if a field has been set. -func (o *TargetGroupProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasAlgorithm returns a boolean if a field has been set. +func (o *TargetGroupProperties) HasAlgorithm() bool { + if o != nil && o.Algorithm != nil { return true } return false } -// GetAlgorithm returns the Algorithm field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupProperties) GetAlgorithm() *string { +// GetHealthCheck returns the HealthCheck field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupProperties) GetHealthCheck() *TargetGroupHealthCheck { if o == nil { return nil } - return o.Algorithm + return o.HealthCheck } -// GetAlgorithmOk returns a tuple with the Algorithm field value +// GetHealthCheckOk returns a tuple with the HealthCheck field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupProperties) GetAlgorithmOk() (*string, bool) { +func (o *TargetGroupProperties) GetHealthCheckOk() (*TargetGroupHealthCheck, bool) { if o == nil { return nil, false } - return o.Algorithm, true + return o.HealthCheck, true } -// SetAlgorithm sets field value -func (o *TargetGroupProperties) SetAlgorithm(v string) { +// SetHealthCheck sets field value +func (o *TargetGroupProperties) SetHealthCheck(v TargetGroupHealthCheck) { - o.Algorithm = &v + o.HealthCheck = &v } -// HasAlgorithm returns a boolean if a field has been set. -func (o *TargetGroupProperties) HasAlgorithm() bool { - if o != nil && o.Algorithm != nil { +// HasHealthCheck returns a boolean if a field has been set. +func (o *TargetGroupProperties) HasHealthCheck() bool { + if o != nil && o.HealthCheck != nil { return true } return false } -// GetProtocol returns the Protocol field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupProperties) GetProtocol() *string { +// GetHttpHealthCheck returns the HttpHealthCheck field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupProperties) GetHttpHealthCheck() *TargetGroupHttpHealthCheck { if o == nil { return nil } - return o.Protocol + return o.HttpHealthCheck } -// GetProtocolOk returns a tuple with the Protocol field value +// GetHttpHealthCheckOk returns a tuple with the HttpHealthCheck field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupProperties) GetProtocolOk() (*string, bool) { +func (o *TargetGroupProperties) GetHttpHealthCheckOk() (*TargetGroupHttpHealthCheck, bool) { if o == nil { return nil, false } - return o.Protocol, true + return o.HttpHealthCheck, true } -// SetProtocol sets field value -func (o *TargetGroupProperties) SetProtocol(v string) { +// SetHttpHealthCheck sets field value +func (o *TargetGroupProperties) SetHttpHealthCheck(v TargetGroupHttpHealthCheck) { - o.Protocol = &v + o.HttpHealthCheck = &v } -// HasProtocol returns a boolean if a field has been set. -func (o *TargetGroupProperties) HasProtocol() bool { - if o != nil && o.Protocol != nil { +// HasHttpHealthCheck returns a boolean if a field has been set. +func (o *TargetGroupProperties) HasHttpHealthCheck() bool { + if o != nil && o.HttpHealthCheck != nil { return true } return false } -// GetTargets returns the Targets field value -// If the value is explicit nil, the zero value for []TargetGroupTarget will be returned -func (o *TargetGroupProperties) GetTargets() *[]TargetGroupTarget { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupProperties) GetName() *string { if o == nil { return nil } - return o.Targets + return o.Name } -// GetTargetsOk returns a tuple with the Targets field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupProperties) GetTargetsOk() (*[]TargetGroupTarget, bool) { +func (o *TargetGroupProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Targets, true + return o.Name, true } -// SetTargets sets field value -func (o *TargetGroupProperties) SetTargets(v []TargetGroupTarget) { +// SetName sets field value +func (o *TargetGroupProperties) SetName(v string) { - o.Targets = &v + o.Name = &v } -// HasTargets returns a boolean if a field has been set. -func (o *TargetGroupProperties) HasTargets() bool { - if o != nil && o.Targets != nil { +// HasName returns a boolean if a field has been set. +func (o *TargetGroupProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetHealthCheck returns the HealthCheck field value -// If the value is explicit nil, the zero value for TargetGroupHealthCheck will be returned -func (o *TargetGroupProperties) GetHealthCheck() *TargetGroupHealthCheck { +// GetProtocol returns the Protocol field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupProperties) GetProtocol() *string { if o == nil { return nil } - return o.HealthCheck + return o.Protocol } -// GetHealthCheckOk returns a tuple with the HealthCheck field value +// GetProtocolOk returns a tuple with the Protocol field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupProperties) GetHealthCheckOk() (*TargetGroupHealthCheck, bool) { +func (o *TargetGroupProperties) GetProtocolOk() (*string, bool) { if o == nil { return nil, false } - return o.HealthCheck, true + return o.Protocol, true } -// SetHealthCheck sets field value -func (o *TargetGroupProperties) SetHealthCheck(v TargetGroupHealthCheck) { +// SetProtocol sets field value +func (o *TargetGroupProperties) SetProtocol(v string) { - o.HealthCheck = &v + o.Protocol = &v } -// HasHealthCheck returns a boolean if a field has been set. -func (o *TargetGroupProperties) HasHealthCheck() bool { - if o != nil && o.HealthCheck != nil { +// HasProtocol returns a boolean if a field has been set. +func (o *TargetGroupProperties) HasProtocol() bool { + if o != nil && o.Protocol != nil { return true } return false } -// GetHttpHealthCheck returns the HttpHealthCheck field value -// If the value is explicit nil, the zero value for TargetGroupHttpHealthCheck will be returned -func (o *TargetGroupProperties) GetHttpHealthCheck() *TargetGroupHttpHealthCheck { +// GetTargets returns the Targets field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupProperties) GetTargets() *[]TargetGroupTarget { if o == nil { return nil } - return o.HttpHealthCheck + return o.Targets } -// GetHttpHealthCheckOk returns a tuple with the HttpHealthCheck field value +// GetTargetsOk returns a tuple with the Targets field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupProperties) GetHttpHealthCheckOk() (*TargetGroupHttpHealthCheck, bool) { +func (o *TargetGroupProperties) GetTargetsOk() (*[]TargetGroupTarget, bool) { if o == nil { return nil, false } - return o.HttpHealthCheck, true + return o.Targets, true } -// SetHttpHealthCheck sets field value -func (o *TargetGroupProperties) SetHttpHealthCheck(v TargetGroupHttpHealthCheck) { +// SetTargets sets field value +func (o *TargetGroupProperties) SetTargets(v []TargetGroupTarget) { - o.HttpHealthCheck = &v + o.Targets = &v } -// HasHttpHealthCheck returns a boolean if a field has been set. -func (o *TargetGroupProperties) HasHttpHealthCheck() bool { - if o != nil && o.HttpHealthCheck != nil { +// HasTargets returns a boolean if a field has been set. +func (o *TargetGroupProperties) HasTargets() bool { + if o != nil && o.Targets != nil { return true } @@ -280,24 +280,30 @@ func (o *TargetGroupProperties) HasHttpHealthCheck() bool { func (o TargetGroupProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } if o.Algorithm != nil { toSerialize["algorithm"] = o.Algorithm } - if o.Protocol != nil { - toSerialize["protocol"] = o.Protocol - } - if o.Targets != nil { - toSerialize["targets"] = o.Targets - } + if o.HealthCheck != nil { toSerialize["healthCheck"] = o.HealthCheck } + if o.HttpHealthCheck != nil { toSerialize["httpHealthCheck"] = o.HttpHealthCheck } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + + if o.Protocol != nil { + toSerialize["protocol"] = o.Protocol + } + + if o.Targets != nil { + toSerialize["targets"] = o.Targets + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go index e5f5afd56..b5f93b565 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go @@ -16,13 +16,13 @@ import ( // TargetGroupPut struct for TargetGroupPut type TargetGroupPut struct { + // The URL to the object representation (absolute path). + Href *string `json:"href,omitempty"` // The resource's unique identifier. - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty"` + Properties *TargetGroupProperties `json:"properties"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Properties *TargetGroupProperties `json:"properties"` } // NewTargetGroupPut instantiates a new TargetGroupPut object @@ -45,152 +45,152 @@ func NewTargetGroupPutWithDefaults() *TargetGroupPut { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupPut) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupPut) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupPut) GetIdOk() (*string, bool) { +func (o *TargetGroupPut) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *TargetGroupPut) SetId(v string) { +// SetHref sets field value +func (o *TargetGroupPut) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *TargetGroupPut) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *TargetGroupPut) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *TargetGroupPut) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupPut) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupPut) GetTypeOk() (*Type, bool) { +func (o *TargetGroupPut) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *TargetGroupPut) SetType(v Type) { +// SetId sets field value +func (o *TargetGroupPut) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *TargetGroupPut) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *TargetGroupPut) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupPut) GetHref() *string { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupPut) GetProperties() *TargetGroupProperties { if o == nil { return nil } - return o.Href + return o.Properties } -// GetHrefOk returns a tuple with the Href field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupPut) GetHrefOk() (*string, bool) { +func (o *TargetGroupPut) GetPropertiesOk() (*TargetGroupProperties, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Properties, true } -// SetHref sets field value -func (o *TargetGroupPut) SetHref(v string) { +// SetProperties sets field value +func (o *TargetGroupPut) SetProperties(v TargetGroupProperties) { - o.Href = &v + o.Properties = &v } -// HasHref returns a boolean if a field has been set. -func (o *TargetGroupPut) HasHref() bool { - if o != nil && o.Href != nil { +// HasProperties returns a boolean if a field has been set. +func (o *TargetGroupPut) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for TargetGroupProperties will be returned -func (o *TargetGroupPut) GetProperties() *TargetGroupProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupPut) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupPut) GetPropertiesOk() (*TargetGroupProperties, bool) { +func (o *TargetGroupPut) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *TargetGroupPut) SetProperties(v TargetGroupProperties) { +// SetType sets field value +func (o *TargetGroupPut) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *TargetGroupPut) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *TargetGroupPut) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -199,18 +199,22 @@ func (o *TargetGroupPut) HasProperties() bool { func (o TargetGroupPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go index bad8c42d5..a9c9e2d9e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go @@ -16,16 +16,16 @@ import ( // TargetGroupTarget struct for TargetGroupTarget type TargetGroupTarget struct { + // When the health check is enabled, the target is available only when it accepts regular TCP or HTTP connection attempts for state checking. The state check consists of one connection attempt with the target's address and port. The default value is 'TRUE'. + HealthCheckEnabled *bool `json:"healthCheckEnabled,omitempty"` // The IP address of the balanced target. Ip *string `json:"ip"` + // When the maintenance mode is enabled, the target is prevented from receiving traffic; the default value is 'FALSE'. + MaintenanceEnabled *bool `json:"maintenanceEnabled,omitempty"` // The port of the balanced target service; the valid range is 1 to 65535. Port *int32 `json:"port"` // The traffic is distributed proportionally to target weight, which is the ratio of the total weight of all targets. A target with higher weight receives a larger share of traffic. The valid range is from 0 to 256; the default value is '1'. Targets with a weight of '0' do not participate in load balancing but still accept persistent connections. We recommend using values in the middle range to leave room for later adjustments. Weight *int32 `json:"weight"` - // When the health check is enabled, the target is available only when it accepts regular TCP or HTTP connection attempts for state checking. The state check consists of one connection attempt with the target's address and port. The default value is 'TRUE'. - HealthCheckEnabled *bool `json:"healthCheckEnabled,omitempty"` - // When the maintenance mode is enabled, the target is prevented from receiving traffic; the default value is 'FALSE'. - MaintenanceEnabled *bool `json:"maintenanceEnabled,omitempty"` } // NewTargetGroupTarget instantiates a new TargetGroupTarget object @@ -50,190 +50,190 @@ func NewTargetGroupTargetWithDefaults() *TargetGroupTarget { return &this } -// GetIp returns the Ip field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroupTarget) GetIp() *string { +// GetHealthCheckEnabled returns the HealthCheckEnabled field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupTarget) GetHealthCheckEnabled() *bool { if o == nil { return nil } - return o.Ip + return o.HealthCheckEnabled } -// GetIpOk returns a tuple with the Ip field value +// GetHealthCheckEnabledOk returns a tuple with the HealthCheckEnabled field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupTarget) GetIpOk() (*string, bool) { +func (o *TargetGroupTarget) GetHealthCheckEnabledOk() (*bool, bool) { if o == nil { return nil, false } - return o.Ip, true + return o.HealthCheckEnabled, true } -// SetIp sets field value -func (o *TargetGroupTarget) SetIp(v string) { +// SetHealthCheckEnabled sets field value +func (o *TargetGroupTarget) SetHealthCheckEnabled(v bool) { - o.Ip = &v + o.HealthCheckEnabled = &v } -// HasIp returns a boolean if a field has been set. -func (o *TargetGroupTarget) HasIp() bool { - if o != nil && o.Ip != nil { +// HasHealthCheckEnabled returns a boolean if a field has been set. +func (o *TargetGroupTarget) HasHealthCheckEnabled() bool { + if o != nil && o.HealthCheckEnabled != nil { return true } return false } -// GetPort returns the Port field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *TargetGroupTarget) GetPort() *int32 { +// GetIp returns the Ip field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupTarget) GetIp() *string { if o == nil { return nil } - return o.Port + return o.Ip } -// GetPortOk returns a tuple with the Port field value +// GetIpOk returns a tuple with the Ip field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupTarget) GetPortOk() (*int32, bool) { +func (o *TargetGroupTarget) GetIpOk() (*string, bool) { if o == nil { return nil, false } - return o.Port, true + return o.Ip, true } -// SetPort sets field value -func (o *TargetGroupTarget) SetPort(v int32) { +// SetIp sets field value +func (o *TargetGroupTarget) SetIp(v string) { - o.Port = &v + o.Ip = &v } -// HasPort returns a boolean if a field has been set. -func (o *TargetGroupTarget) HasPort() bool { - if o != nil && o.Port != nil { +// HasIp returns a boolean if a field has been set. +func (o *TargetGroupTarget) HasIp() bool { + if o != nil && o.Ip != nil { return true } return false } -// GetWeight returns the Weight field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *TargetGroupTarget) GetWeight() *int32 { +// GetMaintenanceEnabled returns the MaintenanceEnabled field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupTarget) GetMaintenanceEnabled() *bool { if o == nil { return nil } - return o.Weight + return o.MaintenanceEnabled } -// GetWeightOk returns a tuple with the Weight field value +// GetMaintenanceEnabledOk returns a tuple with the MaintenanceEnabled field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupTarget) GetWeightOk() (*int32, bool) { +func (o *TargetGroupTarget) GetMaintenanceEnabledOk() (*bool, bool) { if o == nil { return nil, false } - return o.Weight, true + return o.MaintenanceEnabled, true } -// SetWeight sets field value -func (o *TargetGroupTarget) SetWeight(v int32) { +// SetMaintenanceEnabled sets field value +func (o *TargetGroupTarget) SetMaintenanceEnabled(v bool) { - o.Weight = &v + o.MaintenanceEnabled = &v } -// HasWeight returns a boolean if a field has been set. -func (o *TargetGroupTarget) HasWeight() bool { - if o != nil && o.Weight != nil { +// HasMaintenanceEnabled returns a boolean if a field has been set. +func (o *TargetGroupTarget) HasMaintenanceEnabled() bool { + if o != nil && o.MaintenanceEnabled != nil { return true } return false } -// GetHealthCheckEnabled returns the HealthCheckEnabled field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *TargetGroupTarget) GetHealthCheckEnabled() *bool { +// GetPort returns the Port field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupTarget) GetPort() *int32 { if o == nil { return nil } - return o.HealthCheckEnabled + return o.Port } -// GetHealthCheckEnabledOk returns a tuple with the HealthCheckEnabled field value +// GetPortOk returns a tuple with the Port field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupTarget) GetHealthCheckEnabledOk() (*bool, bool) { +func (o *TargetGroupTarget) GetPortOk() (*int32, bool) { if o == nil { return nil, false } - return o.HealthCheckEnabled, true + return o.Port, true } -// SetHealthCheckEnabled sets field value -func (o *TargetGroupTarget) SetHealthCheckEnabled(v bool) { +// SetPort sets field value +func (o *TargetGroupTarget) SetPort(v int32) { - o.HealthCheckEnabled = &v + o.Port = &v } -// HasHealthCheckEnabled returns a boolean if a field has been set. -func (o *TargetGroupTarget) HasHealthCheckEnabled() bool { - if o != nil && o.HealthCheckEnabled != nil { +// HasPort returns a boolean if a field has been set. +func (o *TargetGroupTarget) HasPort() bool { + if o != nil && o.Port != nil { return true } return false } -// GetMaintenanceEnabled returns the MaintenanceEnabled field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *TargetGroupTarget) GetMaintenanceEnabled() *bool { +// GetWeight returns the Weight field value +// If the value is explicit nil, nil is returned +func (o *TargetGroupTarget) GetWeight() *int32 { if o == nil { return nil } - return o.MaintenanceEnabled + return o.Weight } -// GetMaintenanceEnabledOk returns a tuple with the MaintenanceEnabled field value +// GetWeightOk returns a tuple with the Weight field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroupTarget) GetMaintenanceEnabledOk() (*bool, bool) { +func (o *TargetGroupTarget) GetWeightOk() (*int32, bool) { if o == nil { return nil, false } - return o.MaintenanceEnabled, true + return o.Weight, true } -// SetMaintenanceEnabled sets field value -func (o *TargetGroupTarget) SetMaintenanceEnabled(v bool) { +// SetWeight sets field value +func (o *TargetGroupTarget) SetWeight(v int32) { - o.MaintenanceEnabled = &v + o.Weight = &v } -// HasMaintenanceEnabled returns a boolean if a field has been set. -func (o *TargetGroupTarget) HasMaintenanceEnabled() bool { - if o != nil && o.MaintenanceEnabled != nil { +// HasWeight returns a boolean if a field has been set. +func (o *TargetGroupTarget) HasWeight() bool { + if o != nil && o.Weight != nil { return true } @@ -242,21 +242,26 @@ func (o *TargetGroupTarget) HasMaintenanceEnabled() bool { func (o TargetGroupTarget) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.HealthCheckEnabled != nil { + toSerialize["healthCheckEnabled"] = o.HealthCheckEnabled + } + if o.Ip != nil { toSerialize["ip"] = o.Ip } + + if o.MaintenanceEnabled != nil { + toSerialize["maintenanceEnabled"] = o.MaintenanceEnabled + } + if o.Port != nil { toSerialize["port"] = o.Port } + if o.Weight != nil { toSerialize["weight"] = o.Weight } - if o.HealthCheckEnabled != nil { - toSerialize["healthCheckEnabled"] = o.HealthCheckEnabled - } - if o.MaintenanceEnabled != nil { - toSerialize["maintenanceEnabled"] = o.MaintenanceEnabled - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go index ca5778f54..521ca7b3f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go @@ -16,19 +16,19 @@ import ( // TargetGroups struct for TargetGroups type TargetGroups struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]TargetGroup `json:"items,omitempty"` + // The limit, specified in the request (if not specified, the endpoint's default pagination limit is used). + Limit *float32 `json:"limit,omitempty"` // The offset, specified in the request (if not is specified, 0 is used by default). Offset *float32 `json:"offset,omitempty"` - // The limit, specified in the request (if not specified, the endpoint's default pagination limit is used). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewTargetGroups instantiates a new TargetGroups object @@ -49,114 +49,114 @@ func NewTargetGroupsWithDefaults() *TargetGroups { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroups) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *TargetGroups) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroups) GetIdOk() (*string, bool) { +func (o *TargetGroups) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *TargetGroups) SetId(v string) { +// SetLinks sets field value +func (o *TargetGroups) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *TargetGroups) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *TargetGroups) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *TargetGroups) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *TargetGroups) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroups) GetTypeOk() (*Type, bool) { +func (o *TargetGroups) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *TargetGroups) SetType(v Type) { +// SetHref sets field value +func (o *TargetGroups) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *TargetGroups) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *TargetGroups) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TargetGroups) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *TargetGroups) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroups) GetHrefOk() (*string, bool) { +func (o *TargetGroups) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *TargetGroups) SetHref(v string) { +// SetId sets field value +func (o *TargetGroups) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *TargetGroups) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *TargetGroups) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *TargetGroups) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []TargetGroup will be returned +// If the value is explicit nil, nil is returned func (o *TargetGroups) GetItems() *[]TargetGroup { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *TargetGroups) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *TargetGroups) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *TargetGroups) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroups) GetOffsetOk() (*float32, bool) { +func (o *TargetGroups) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *TargetGroups) SetOffset(v float32) { +// SetLimit sets field value +func (o *TargetGroups) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *TargetGroups) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *TargetGroups) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *TargetGroups) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *TargetGroups) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroups) GetLimitOk() (*float32, bool) { +func (o *TargetGroups) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *TargetGroups) SetLimit(v float32) { +// SetOffset sets field value +func (o *TargetGroups) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *TargetGroups) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *TargetGroups) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *TargetGroups) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *TargetGroups) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetGroups) GetLinksOk() (*PaginationLinks, bool) { +func (o *TargetGroups) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *TargetGroups) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *TargetGroups) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *TargetGroups) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *TargetGroups) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *TargetGroups) HasLinks() bool { func (o TargetGroups) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_port_range.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_port_range.go index 9a1c39ea5..8c5281ed8 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_port_range.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_port_range.go @@ -16,10 +16,10 @@ import ( // TargetPortRange struct for TargetPortRange type TargetPortRange struct { - // Target port range start associated with the NAT Gateway rule. - Start *int32 `json:"start,omitempty"` // Target port range end associated with the NAT Gateway rule. End *int32 `json:"end,omitempty"` + // Target port range start associated with the NAT Gateway rule. + Start *int32 `json:"start,omitempty"` } // NewTargetPortRange instantiates a new TargetPortRange object @@ -40,76 +40,76 @@ func NewTargetPortRangeWithDefaults() *TargetPortRange { return &this } -// GetStart returns the Start field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *TargetPortRange) GetStart() *int32 { +// GetEnd returns the End field value +// If the value is explicit nil, nil is returned +func (o *TargetPortRange) GetEnd() *int32 { if o == nil { return nil } - return o.Start + return o.End } -// GetStartOk returns a tuple with the Start field value +// GetEndOk returns a tuple with the End field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetPortRange) GetStartOk() (*int32, bool) { +func (o *TargetPortRange) GetEndOk() (*int32, bool) { if o == nil { return nil, false } - return o.Start, true + return o.End, true } -// SetStart sets field value -func (o *TargetPortRange) SetStart(v int32) { +// SetEnd sets field value +func (o *TargetPortRange) SetEnd(v int32) { - o.Start = &v + o.End = &v } -// HasStart returns a boolean if a field has been set. -func (o *TargetPortRange) HasStart() bool { - if o != nil && o.Start != nil { +// HasEnd returns a boolean if a field has been set. +func (o *TargetPortRange) HasEnd() bool { + if o != nil && o.End != nil { return true } return false } -// GetEnd returns the End field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *TargetPortRange) GetEnd() *int32 { +// GetStart returns the Start field value +// If the value is explicit nil, nil is returned +func (o *TargetPortRange) GetStart() *int32 { if o == nil { return nil } - return o.End + return o.Start } -// GetEndOk returns a tuple with the End field value +// GetStartOk returns a tuple with the Start field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TargetPortRange) GetEndOk() (*int32, bool) { +func (o *TargetPortRange) GetStartOk() (*int32, bool) { if o == nil { return nil, false } - return o.End, true + return o.Start, true } -// SetEnd sets field value -func (o *TargetPortRange) SetEnd(v int32) { +// SetStart sets field value +func (o *TargetPortRange) SetStart(v int32) { - o.End = &v + o.Start = &v } -// HasEnd returns a boolean if a field has been set. -func (o *TargetPortRange) HasEnd() bool { - if o != nil && o.End != nil { +// HasStart returns a boolean if a field has been set. +func (o *TargetPortRange) HasStart() bool { + if o != nil && o.Start != nil { return true } @@ -118,12 +118,14 @@ func (o *TargetPortRange) HasEnd() bool { func (o TargetPortRange) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Start != nil { - toSerialize["start"] = o.Start - } if o.End != nil { toSerialize["end"] = o.End } + + if o.Start != nil { + toSerialize["start"] = o.Start + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go index 48156f106..ad290d409 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go @@ -16,14 +16,14 @@ import ( // Template struct for Template type Template struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *TemplateProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewTemplate instantiates a new Template object @@ -46,190 +46,190 @@ func NewTemplateWithDefaults() *Template { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Template) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Template) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Template) GetIdOk() (*string, bool) { +func (o *Template) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Template) SetId(v string) { +// SetHref sets field value +func (o *Template) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Template) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Template) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Template) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Template) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Template) GetTypeOk() (*Type, bool) { +func (o *Template) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Template) SetType(v Type) { +// SetId sets field value +func (o *Template) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Template) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Template) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Template) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Template) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Template) GetHrefOk() (*string, bool) { +func (o *Template) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Template) SetHref(v string) { +// SetMetadata sets field value +func (o *Template) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Template) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Template) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *Template) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Template) GetProperties() *TemplateProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Template) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *Template) GetPropertiesOk() (*TemplateProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Template) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *Template) SetProperties(v TemplateProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Template) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Template) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for TemplateProperties will be returned -func (o *Template) GetProperties() *TemplateProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Template) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Template) GetPropertiesOk() (*TemplateProperties, bool) { +func (o *Template) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Template) SetProperties(v TemplateProperties) { +// SetType sets field value +func (o *Template) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Template) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Template) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Template) HasProperties() bool { func (o Template) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go index 5d661eee2..98d115011 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go @@ -16,10 +16,10 @@ import ( // TemplateProperties struct for TemplateProperties type TemplateProperties struct { - // The resource name. - Name *string `json:"name"` // The CPU cores count. Cores *float32 `json:"cores"` + // The resource name. + Name *string `json:"name"` // The RAM size in MB. Ram *float32 `json:"ram"` // The storage size in GB. @@ -30,11 +30,11 @@ type TemplateProperties struct { // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewTemplateProperties(name string, cores float32, ram float32, storageSize float32) *TemplateProperties { +func NewTemplateProperties(cores float32, name string, ram float32, storageSize float32) *TemplateProperties { this := TemplateProperties{} - this.Name = &name this.Cores = &cores + this.Name = &name this.Ram = &ram this.StorageSize = &storageSize @@ -49,76 +49,76 @@ func NewTemplatePropertiesWithDefaults() *TemplateProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *TemplateProperties) GetName() *string { +// GetCores returns the Cores field value +// If the value is explicit nil, nil is returned +func (o *TemplateProperties) GetCores() *float32 { if o == nil { return nil } - return o.Name + return o.Cores } -// GetNameOk returns a tuple with the Name field value +// GetCoresOk returns a tuple with the Cores field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TemplateProperties) GetNameOk() (*string, bool) { +func (o *TemplateProperties) GetCoresOk() (*float32, bool) { if o == nil { return nil, false } - return o.Name, true + return o.Cores, true } -// SetName sets field value -func (o *TemplateProperties) SetName(v string) { +// SetCores sets field value +func (o *TemplateProperties) SetCores(v float32) { - o.Name = &v + o.Cores = &v } -// HasName returns a boolean if a field has been set. -func (o *TemplateProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasCores returns a boolean if a field has been set. +func (o *TemplateProperties) HasCores() bool { + if o != nil && o.Cores != nil { return true } return false } -// GetCores returns the Cores field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *TemplateProperties) GetCores() *float32 { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *TemplateProperties) GetName() *string { if o == nil { return nil } - return o.Cores + return o.Name } -// GetCoresOk returns a tuple with the Cores field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *TemplateProperties) GetCoresOk() (*float32, bool) { +func (o *TemplateProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.Cores, true + return o.Name, true } -// SetCores sets field value -func (o *TemplateProperties) SetCores(v float32) { +// SetName sets field value +func (o *TemplateProperties) SetName(v string) { - o.Cores = &v + o.Name = &v } -// HasCores returns a boolean if a field has been set. -func (o *TemplateProperties) HasCores() bool { - if o != nil && o.Cores != nil { +// HasName returns a boolean if a field has been set. +func (o *TemplateProperties) HasName() bool { + if o != nil && o.Name != nil { return true } @@ -126,7 +126,7 @@ func (o *TemplateProperties) HasCores() bool { } // GetRam returns the Ram field value -// If the value is explicit nil, the zero value for float32 will be returned +// If the value is explicit nil, nil is returned func (o *TemplateProperties) GetRam() *float32 { if o == nil { return nil @@ -164,7 +164,7 @@ func (o *TemplateProperties) HasRam() bool { } // GetStorageSize returns the StorageSize field value -// If the value is explicit nil, the zero value for float32 will be returned +// If the value is explicit nil, nil is returned func (o *TemplateProperties) GetStorageSize() *float32 { if o == nil { return nil @@ -203,18 +203,22 @@ func (o *TemplateProperties) HasStorageSize() bool { func (o TemplateProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } if o.Cores != nil { toSerialize["cores"] = o.Cores } + + if o.Name != nil { + toSerialize["name"] = o.Name + } + if o.Ram != nil { toSerialize["ram"] = o.Ram } + if o.StorageSize != nil { toSerialize["storageSize"] = o.StorageSize } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go index 1d6e9640c..c66eac10c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go @@ -16,14 +16,14 @@ import ( // Templates struct for Templates type Templates struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Template `json:"items,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewTemplates instantiates a new Templates object @@ -44,152 +44,152 @@ func NewTemplatesWithDefaults() *Templates { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Templates) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Templates) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Templates) GetIdOk() (*string, bool) { +func (o *Templates) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Templates) SetId(v string) { +// SetHref sets field value +func (o *Templates) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Templates) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Templates) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Templates) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Templates) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Templates) GetTypeOk() (*Type, bool) { +func (o *Templates) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Templates) SetType(v Type) { +// SetId sets field value +func (o *Templates) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Templates) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Templates) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Templates) GetHref() *string { +// GetItems returns the Items field value +// If the value is explicit nil, nil is returned +func (o *Templates) GetItems() *[]Template { if o == nil { return nil } - return o.Href + return o.Items } -// GetHrefOk returns a tuple with the Href field value +// GetItemsOk returns a tuple with the Items field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Templates) GetHrefOk() (*string, bool) { +func (o *Templates) GetItemsOk() (*[]Template, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Items, true } -// SetHref sets field value -func (o *Templates) SetHref(v string) { +// SetItems sets field value +func (o *Templates) SetItems(v []Template) { - o.Href = &v + o.Items = &v } -// HasHref returns a boolean if a field has been set. -func (o *Templates) HasHref() bool { - if o != nil && o.Href != nil { +// HasItems returns a boolean if a field has been set. +func (o *Templates) HasItems() bool { + if o != nil && o.Items != nil { return true } return false } -// GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Template will be returned -func (o *Templates) GetItems() *[]Template { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Templates) GetType() *Type { if o == nil { return nil } - return o.Items + return o.Type } -// GetItemsOk returns a tuple with the Items field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Templates) GetItemsOk() (*[]Template, bool) { +func (o *Templates) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Items, true + return o.Type, true } -// SetItems sets field value -func (o *Templates) SetItems(v []Template) { +// SetType sets field value +func (o *Templates) SetType(v Type) { - o.Items = &v + o.Type = &v } -// HasItems returns a boolean if a field has been set. -func (o *Templates) HasItems() bool { - if o != nil && o.Items != nil { +// HasType returns a boolean if a field has been set. +func (o *Templates) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -198,18 +198,22 @@ func (o *Templates) HasItems() bool { func (o Templates) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_token.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_token.go index 074e07ad6..c886b2680 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_token.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_token.go @@ -39,7 +39,7 @@ func NewTokenWithDefaults() *Token { } // GetToken returns the Token field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *Token) GetToken() *string { if o == nil { return nil @@ -81,6 +81,7 @@ func (o Token) MarshalJSON() ([]byte, error) { if o.Token != nil { toSerialize["token"] = o.Token } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user.go index 14864be69..38d1a9b06 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user.go @@ -16,15 +16,15 @@ import ( // User struct for User type User struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Entities *UsersEntities `json:"entities,omitempty"` // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *UserMetadata `json:"metadata,omitempty"` Properties *UserProperties `json:"properties"` - Entities *UsersEntities `json:"entities,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewUser instantiates a new User object @@ -47,114 +47,114 @@ func NewUserWithDefaults() *User { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *User) GetId() *string { +// GetEntities returns the Entities field value +// If the value is explicit nil, nil is returned +func (o *User) GetEntities() *UsersEntities { if o == nil { return nil } - return o.Id + return o.Entities } -// GetIdOk returns a tuple with the Id field value +// GetEntitiesOk returns a tuple with the Entities field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *User) GetIdOk() (*string, bool) { +func (o *User) GetEntitiesOk() (*UsersEntities, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Entities, true } -// SetId sets field value -func (o *User) SetId(v string) { +// SetEntities sets field value +func (o *User) SetEntities(v UsersEntities) { - o.Id = &v + o.Entities = &v } -// HasId returns a boolean if a field has been set. -func (o *User) HasId() bool { - if o != nil && o.Id != nil { +// HasEntities returns a boolean if a field has been set. +func (o *User) HasEntities() bool { + if o != nil && o.Entities != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *User) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *User) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *User) GetTypeOk() (*Type, bool) { +func (o *User) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *User) SetType(v Type) { +// SetHref sets field value +func (o *User) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *User) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *User) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *User) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *User) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *User) GetHrefOk() (*string, bool) { +func (o *User) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *User) SetHref(v string) { +// SetId sets field value +func (o *User) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *User) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *User) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -162,7 +162,7 @@ func (o *User) HasHref() bool { } // GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for UserMetadata will be returned +// If the value is explicit nil, nil is returned func (o *User) GetMetadata() *UserMetadata { if o == nil { return nil @@ -200,7 +200,7 @@ func (o *User) HasMetadata() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for UserProperties will be returned +// If the value is explicit nil, nil is returned func (o *User) GetProperties() *UserProperties { if o == nil { return nil @@ -237,38 +237,38 @@ func (o *User) HasProperties() bool { return false } -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for UsersEntities will be returned -func (o *User) GetEntities() *UsersEntities { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *User) GetType() *Type { if o == nil { return nil } - return o.Entities + return o.Type } -// GetEntitiesOk returns a tuple with the Entities field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *User) GetEntitiesOk() (*UsersEntities, bool) { +func (o *User) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Entities, true + return o.Type, true } -// SetEntities sets field value -func (o *User) SetEntities(v UsersEntities) { +// SetType sets field value +func (o *User) SetType(v Type) { - o.Entities = &v + o.Type = &v } -// HasEntities returns a boolean if a field has been set. -func (o *User) HasEntities() bool { - if o != nil && o.Entities != nil { +// HasType returns a boolean if a field has been set. +func (o *User) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -277,24 +277,30 @@ func (o *User) HasEntities() bool { func (o User) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Entities != nil { + toSerialize["entities"] = o.Entities } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } - if o.Entities != nil { - toSerialize["entities"] = o.Entities + + if o.Type != nil { + toSerialize["type"] = o.Type } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_metadata.go index da90dc00b..de6738d71 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_metadata.go @@ -17,10 +17,10 @@ import ( // UserMetadata struct for UserMetadata type UserMetadata struct { - // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. - Etag *string `json:"etag,omitempty"` // The time the user was created. CreatedDate *IonosTime + // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. + Etag *string `json:"etag,omitempty"` // The time of the last login by the user. LastLogin *IonosTime } @@ -43,83 +43,83 @@ func NewUserMetadataWithDefaults() *UserMetadata { return &this } -// GetEtag returns the Etag field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserMetadata) GetEtag() *string { +// GetCreatedDate returns the CreatedDate field value +// If the value is explicit nil, nil is returned +func (o *UserMetadata) GetCreatedDate() *time.Time { if o == nil { return nil } - return o.Etag + if o.CreatedDate == nil { + return nil + } + return &o.CreatedDate.Time } -// GetEtagOk returns a tuple with the Etag field value +// GetCreatedDateOk returns a tuple with the CreatedDate field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserMetadata) GetEtagOk() (*string, bool) { +func (o *UserMetadata) GetCreatedDateOk() (*time.Time, bool) { if o == nil { return nil, false } - return o.Etag, true + if o.CreatedDate == nil { + return nil, false + } + return &o.CreatedDate.Time, true + } -// SetEtag sets field value -func (o *UserMetadata) SetEtag(v string) { +// SetCreatedDate sets field value +func (o *UserMetadata) SetCreatedDate(v time.Time) { - o.Etag = &v + o.CreatedDate = &IonosTime{v} } -// HasEtag returns a boolean if a field has been set. -func (o *UserMetadata) HasEtag() bool { - if o != nil && o.Etag != nil { +// HasCreatedDate returns a boolean if a field has been set. +func (o *UserMetadata) HasCreatedDate() bool { + if o != nil && o.CreatedDate != nil { return true } return false } -// GetCreatedDate returns the CreatedDate field value -// If the value is explicit nil, the zero value for time.Time will be returned -func (o *UserMetadata) GetCreatedDate() *time.Time { +// GetEtag returns the Etag field value +// If the value is explicit nil, nil is returned +func (o *UserMetadata) GetEtag() *string { if o == nil { return nil } - if o.CreatedDate == nil { - return nil - } - return &o.CreatedDate.Time + return o.Etag } -// GetCreatedDateOk returns a tuple with the CreatedDate field value +// GetEtagOk returns a tuple with the Etag field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserMetadata) GetCreatedDateOk() (*time.Time, bool) { +func (o *UserMetadata) GetEtagOk() (*string, bool) { if o == nil { return nil, false } - if o.CreatedDate == nil { - return nil, false - } - return &o.CreatedDate.Time, true - + return o.Etag, true } -// SetCreatedDate sets field value -func (o *UserMetadata) SetCreatedDate(v time.Time) { +// SetEtag sets field value +func (o *UserMetadata) SetEtag(v string) { - o.CreatedDate = &IonosTime{v} + o.Etag = &v } -// HasCreatedDate returns a boolean if a field has been set. -func (o *UserMetadata) HasCreatedDate() bool { - if o != nil && o.CreatedDate != nil { +// HasEtag returns a boolean if a field has been set. +func (o *UserMetadata) HasEtag() bool { + if o != nil && o.Etag != nil { return true } @@ -127,7 +127,7 @@ func (o *UserMetadata) HasCreatedDate() bool { } // GetLastLogin returns the LastLogin field value -// If the value is explicit nil, the zero value for time.Time will be returned +// If the value is explicit nil, nil is returned func (o *UserMetadata) GetLastLogin() *time.Time { if o == nil { return nil @@ -173,15 +173,18 @@ func (o *UserMetadata) HasLastLogin() bool { func (o UserMetadata) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Etag != nil { - toSerialize["etag"] = o.Etag - } if o.CreatedDate != nil { toSerialize["createdDate"] = o.CreatedDate } + + if o.Etag != nil { + toSerialize["etag"] = o.Etag + } + if o.LastLogin != nil { toSerialize["lastLogin"] = o.LastLogin } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_post.go index 912dc3391..cfbe91439 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_post.go @@ -40,7 +40,7 @@ func NewUserPostWithDefaults() *UserPost { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for UserPropertiesPost will be returned +// If the value is explicit nil, nil is returned func (o *UserPost) GetProperties() *UserPropertiesPost { if o == nil { return nil @@ -82,6 +82,7 @@ func (o UserPost) MarshalJSON() ([]byte, error) { if o.Properties != nil { toSerialize["properties"] = o.Properties } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties.go index 6ff0ff0f0..7fadd2b43 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties.go @@ -16,22 +16,22 @@ import ( // UserProperties struct for UserProperties type UserProperties struct { - // The first name of the user. - Firstname *string `json:"firstname,omitempty"` - // The last name of the user. - Lastname *string `json:"lastname,omitempty"` - // The email address of the user. - Email *string `json:"email,omitempty"` + // Indicates if the user is active. + Active *bool `json:"active,omitempty"` // Indicates if the user has admin rights. Administrator *bool `json:"administrator,omitempty"` + // The email address of the user. + Email *string `json:"email,omitempty"` + // The first name of the user. + Firstname *string `json:"firstname,omitempty"` // Indicates if secure authentication should be forced on the user. ForceSecAuth *bool `json:"forceSecAuth,omitempty"` - // Indicates if secure authentication is active for the user. - SecAuthActive *bool `json:"secAuthActive,omitempty"` + // The last name of the user. + Lastname *string `json:"lastname,omitempty"` // Canonical (S3) ID of the user for a given identity. S3CanonicalUserId *string `json:"s3CanonicalUserId,omitempty"` - // Indicates if the user is active. - Active *bool `json:"active,omitempty"` + // Indicates if secure authentication is active for the user. + SecAuthActive *bool `json:"secAuthActive,omitempty"` } // NewUserProperties instantiates a new UserProperties object @@ -52,76 +52,76 @@ func NewUserPropertiesWithDefaults() *UserProperties { return &this } -// GetFirstname returns the Firstname field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserProperties) GetFirstname() *string { +// GetActive returns the Active field value +// If the value is explicit nil, nil is returned +func (o *UserProperties) GetActive() *bool { if o == nil { return nil } - return o.Firstname + return o.Active } -// GetFirstnameOk returns a tuple with the Firstname field value +// GetActiveOk returns a tuple with the Active field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserProperties) GetFirstnameOk() (*string, bool) { +func (o *UserProperties) GetActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.Firstname, true + return o.Active, true } -// SetFirstname sets field value -func (o *UserProperties) SetFirstname(v string) { +// SetActive sets field value +func (o *UserProperties) SetActive(v bool) { - o.Firstname = &v + o.Active = &v } -// HasFirstname returns a boolean if a field has been set. -func (o *UserProperties) HasFirstname() bool { - if o != nil && o.Firstname != nil { +// HasActive returns a boolean if a field has been set. +func (o *UserProperties) HasActive() bool { + if o != nil && o.Active != nil { return true } return false } -// GetLastname returns the Lastname field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserProperties) GetLastname() *string { +// GetAdministrator returns the Administrator field value +// If the value is explicit nil, nil is returned +func (o *UserProperties) GetAdministrator() *bool { if o == nil { return nil } - return o.Lastname + return o.Administrator } -// GetLastnameOk returns a tuple with the Lastname field value +// GetAdministratorOk returns a tuple with the Administrator field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserProperties) GetLastnameOk() (*string, bool) { +func (o *UserProperties) GetAdministratorOk() (*bool, bool) { if o == nil { return nil, false } - return o.Lastname, true + return o.Administrator, true } -// SetLastname sets field value -func (o *UserProperties) SetLastname(v string) { +// SetAdministrator sets field value +func (o *UserProperties) SetAdministrator(v bool) { - o.Lastname = &v + o.Administrator = &v } -// HasLastname returns a boolean if a field has been set. -func (o *UserProperties) HasLastname() bool { - if o != nil && o.Lastname != nil { +// HasAdministrator returns a boolean if a field has been set. +func (o *UserProperties) HasAdministrator() bool { + if o != nil && o.Administrator != nil { return true } @@ -129,7 +129,7 @@ func (o *UserProperties) HasLastname() bool { } // GetEmail returns the Email field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *UserProperties) GetEmail() *string { if o == nil { return nil @@ -166,38 +166,38 @@ func (o *UserProperties) HasEmail() bool { return false } -// GetAdministrator returns the Administrator field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserProperties) GetAdministrator() *bool { +// GetFirstname returns the Firstname field value +// If the value is explicit nil, nil is returned +func (o *UserProperties) GetFirstname() *string { if o == nil { return nil } - return o.Administrator + return o.Firstname } -// GetAdministratorOk returns a tuple with the Administrator field value +// GetFirstnameOk returns a tuple with the Firstname field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserProperties) GetAdministratorOk() (*bool, bool) { +func (o *UserProperties) GetFirstnameOk() (*string, bool) { if o == nil { return nil, false } - return o.Administrator, true + return o.Firstname, true } -// SetAdministrator sets field value -func (o *UserProperties) SetAdministrator(v bool) { +// SetFirstname sets field value +func (o *UserProperties) SetFirstname(v string) { - o.Administrator = &v + o.Firstname = &v } -// HasAdministrator returns a boolean if a field has been set. -func (o *UserProperties) HasAdministrator() bool { - if o != nil && o.Administrator != nil { +// HasFirstname returns a boolean if a field has been set. +func (o *UserProperties) HasFirstname() bool { + if o != nil && o.Firstname != nil { return true } @@ -205,7 +205,7 @@ func (o *UserProperties) HasAdministrator() bool { } // GetForceSecAuth returns the ForceSecAuth field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *UserProperties) GetForceSecAuth() *bool { if o == nil { return nil @@ -242,38 +242,38 @@ func (o *UserProperties) HasForceSecAuth() bool { return false } -// GetSecAuthActive returns the SecAuthActive field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserProperties) GetSecAuthActive() *bool { +// GetLastname returns the Lastname field value +// If the value is explicit nil, nil is returned +func (o *UserProperties) GetLastname() *string { if o == nil { return nil } - return o.SecAuthActive + return o.Lastname } -// GetSecAuthActiveOk returns a tuple with the SecAuthActive field value +// GetLastnameOk returns a tuple with the Lastname field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserProperties) GetSecAuthActiveOk() (*bool, bool) { +func (o *UserProperties) GetLastnameOk() (*string, bool) { if o == nil { return nil, false } - return o.SecAuthActive, true + return o.Lastname, true } -// SetSecAuthActive sets field value -func (o *UserProperties) SetSecAuthActive(v bool) { +// SetLastname sets field value +func (o *UserProperties) SetLastname(v string) { - o.SecAuthActive = &v + o.Lastname = &v } -// HasSecAuthActive returns a boolean if a field has been set. -func (o *UserProperties) HasSecAuthActive() bool { - if o != nil && o.SecAuthActive != nil { +// HasLastname returns a boolean if a field has been set. +func (o *UserProperties) HasLastname() bool { + if o != nil && o.Lastname != nil { return true } @@ -281,7 +281,7 @@ func (o *UserProperties) HasSecAuthActive() bool { } // GetS3CanonicalUserId returns the S3CanonicalUserId field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *UserProperties) GetS3CanonicalUserId() *string { if o == nil { return nil @@ -318,38 +318,38 @@ func (o *UserProperties) HasS3CanonicalUserId() bool { return false } -// GetActive returns the Active field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserProperties) GetActive() *bool { +// GetSecAuthActive returns the SecAuthActive field value +// If the value is explicit nil, nil is returned +func (o *UserProperties) GetSecAuthActive() *bool { if o == nil { return nil } - return o.Active + return o.SecAuthActive } -// GetActiveOk returns a tuple with the Active field value +// GetSecAuthActiveOk returns a tuple with the SecAuthActive field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserProperties) GetActiveOk() (*bool, bool) { +func (o *UserProperties) GetSecAuthActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.Active, true + return o.SecAuthActive, true } -// SetActive sets field value -func (o *UserProperties) SetActive(v bool) { +// SetSecAuthActive sets field value +func (o *UserProperties) SetSecAuthActive(v bool) { - o.Active = &v + o.SecAuthActive = &v } -// HasActive returns a boolean if a field has been set. -func (o *UserProperties) HasActive() bool { - if o != nil && o.Active != nil { +// HasSecAuthActive returns a boolean if a field has been set. +func (o *UserProperties) HasSecAuthActive() bool { + if o != nil && o.SecAuthActive != nil { return true } @@ -358,30 +358,38 @@ func (o *UserProperties) HasActive() bool { func (o UserProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Firstname != nil { - toSerialize["firstname"] = o.Firstname + if o.Active != nil { + toSerialize["active"] = o.Active } - if o.Lastname != nil { - toSerialize["lastname"] = o.Lastname + + if o.Administrator != nil { + toSerialize["administrator"] = o.Administrator } + if o.Email != nil { toSerialize["email"] = o.Email } - if o.Administrator != nil { - toSerialize["administrator"] = o.Administrator + + if o.Firstname != nil { + toSerialize["firstname"] = o.Firstname } + if o.ForceSecAuth != nil { toSerialize["forceSecAuth"] = o.ForceSecAuth } - if o.SecAuthActive != nil { - toSerialize["secAuthActive"] = o.SecAuthActive + + if o.Lastname != nil { + toSerialize["lastname"] = o.Lastname } + if o.S3CanonicalUserId != nil { toSerialize["s3CanonicalUserId"] = o.S3CanonicalUserId } - if o.Active != nil { - toSerialize["active"] = o.Active + + if o.SecAuthActive != nil { + toSerialize["secAuthActive"] = o.SecAuthActive } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_post.go index 463eb6dc3..fb4e9026f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_post.go @@ -16,22 +16,22 @@ import ( // UserPropertiesPost struct for UserPropertiesPost type UserPropertiesPost struct { - // The first name of the user. - Firstname *string `json:"firstname,omitempty"` - // The last name of the user. - Lastname *string `json:"lastname,omitempty"` - // The email address of the user. - Email *string `json:"email,omitempty"` + // Indicates if the user is active. + Active *bool `json:"active,omitempty"` // Indicates if the user has admin rights. Administrator *bool `json:"administrator,omitempty"` + // The email address of the user. + Email *string `json:"email,omitempty"` + // The first name of the user. + Firstname *string `json:"firstname,omitempty"` // Indicates if secure authentication should be forced on the user. ForceSecAuth *bool `json:"forceSecAuth,omitempty"` - // Indicates if secure authentication is active for the user. - SecAuthActive *bool `json:"secAuthActive,omitempty"` + // The last name of the user. + Lastname *string `json:"lastname,omitempty"` // User password. Password *string `json:"password,omitempty"` - // Indicates if the user is active. - Active *bool `json:"active,omitempty"` + // Indicates if secure authentication is active for the user. + SecAuthActive *bool `json:"secAuthActive,omitempty"` } // NewUserPropertiesPost instantiates a new UserPropertiesPost object @@ -52,76 +52,76 @@ func NewUserPropertiesPostWithDefaults() *UserPropertiesPost { return &this } -// GetFirstname returns the Firstname field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserPropertiesPost) GetFirstname() *string { +// GetActive returns the Active field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPost) GetActive() *bool { if o == nil { return nil } - return o.Firstname + return o.Active } -// GetFirstnameOk returns a tuple with the Firstname field value +// GetActiveOk returns a tuple with the Active field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPost) GetFirstnameOk() (*string, bool) { +func (o *UserPropertiesPost) GetActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.Firstname, true + return o.Active, true } -// SetFirstname sets field value -func (o *UserPropertiesPost) SetFirstname(v string) { +// SetActive sets field value +func (o *UserPropertiesPost) SetActive(v bool) { - o.Firstname = &v + o.Active = &v } -// HasFirstname returns a boolean if a field has been set. -func (o *UserPropertiesPost) HasFirstname() bool { - if o != nil && o.Firstname != nil { +// HasActive returns a boolean if a field has been set. +func (o *UserPropertiesPost) HasActive() bool { + if o != nil && o.Active != nil { return true } return false } -// GetLastname returns the Lastname field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserPropertiesPost) GetLastname() *string { +// GetAdministrator returns the Administrator field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPost) GetAdministrator() *bool { if o == nil { return nil } - return o.Lastname + return o.Administrator } -// GetLastnameOk returns a tuple with the Lastname field value +// GetAdministratorOk returns a tuple with the Administrator field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPost) GetLastnameOk() (*string, bool) { +func (o *UserPropertiesPost) GetAdministratorOk() (*bool, bool) { if o == nil { return nil, false } - return o.Lastname, true + return o.Administrator, true } -// SetLastname sets field value -func (o *UserPropertiesPost) SetLastname(v string) { +// SetAdministrator sets field value +func (o *UserPropertiesPost) SetAdministrator(v bool) { - o.Lastname = &v + o.Administrator = &v } -// HasLastname returns a boolean if a field has been set. -func (o *UserPropertiesPost) HasLastname() bool { - if o != nil && o.Lastname != nil { +// HasAdministrator returns a boolean if a field has been set. +func (o *UserPropertiesPost) HasAdministrator() bool { + if o != nil && o.Administrator != nil { return true } @@ -129,7 +129,7 @@ func (o *UserPropertiesPost) HasLastname() bool { } // GetEmail returns the Email field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *UserPropertiesPost) GetEmail() *string { if o == nil { return nil @@ -166,38 +166,38 @@ func (o *UserPropertiesPost) HasEmail() bool { return false } -// GetAdministrator returns the Administrator field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPost) GetAdministrator() *bool { +// GetFirstname returns the Firstname field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPost) GetFirstname() *string { if o == nil { return nil } - return o.Administrator + return o.Firstname } -// GetAdministratorOk returns a tuple with the Administrator field value +// GetFirstnameOk returns a tuple with the Firstname field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPost) GetAdministratorOk() (*bool, bool) { +func (o *UserPropertiesPost) GetFirstnameOk() (*string, bool) { if o == nil { return nil, false } - return o.Administrator, true + return o.Firstname, true } -// SetAdministrator sets field value -func (o *UserPropertiesPost) SetAdministrator(v bool) { +// SetFirstname sets field value +func (o *UserPropertiesPost) SetFirstname(v string) { - o.Administrator = &v + o.Firstname = &v } -// HasAdministrator returns a boolean if a field has been set. -func (o *UserPropertiesPost) HasAdministrator() bool { - if o != nil && o.Administrator != nil { +// HasFirstname returns a boolean if a field has been set. +func (o *UserPropertiesPost) HasFirstname() bool { + if o != nil && o.Firstname != nil { return true } @@ -205,7 +205,7 @@ func (o *UserPropertiesPost) HasAdministrator() bool { } // GetForceSecAuth returns the ForceSecAuth field value -// If the value is explicit nil, the zero value for bool will be returned +// If the value is explicit nil, nil is returned func (o *UserPropertiesPost) GetForceSecAuth() *bool { if o == nil { return nil @@ -242,38 +242,38 @@ func (o *UserPropertiesPost) HasForceSecAuth() bool { return false } -// GetSecAuthActive returns the SecAuthActive field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPost) GetSecAuthActive() *bool { +// GetLastname returns the Lastname field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPost) GetLastname() *string { if o == nil { return nil } - return o.SecAuthActive + return o.Lastname } -// GetSecAuthActiveOk returns a tuple with the SecAuthActive field value +// GetLastnameOk returns a tuple with the Lastname field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPost) GetSecAuthActiveOk() (*bool, bool) { +func (o *UserPropertiesPost) GetLastnameOk() (*string, bool) { if o == nil { return nil, false } - return o.SecAuthActive, true + return o.Lastname, true } -// SetSecAuthActive sets field value -func (o *UserPropertiesPost) SetSecAuthActive(v bool) { +// SetLastname sets field value +func (o *UserPropertiesPost) SetLastname(v string) { - o.SecAuthActive = &v + o.Lastname = &v } -// HasSecAuthActive returns a boolean if a field has been set. -func (o *UserPropertiesPost) HasSecAuthActive() bool { - if o != nil && o.SecAuthActive != nil { +// HasLastname returns a boolean if a field has been set. +func (o *UserPropertiesPost) HasLastname() bool { + if o != nil && o.Lastname != nil { return true } @@ -281,7 +281,7 @@ func (o *UserPropertiesPost) HasSecAuthActive() bool { } // GetPassword returns the Password field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *UserPropertiesPost) GetPassword() *string { if o == nil { return nil @@ -318,38 +318,38 @@ func (o *UserPropertiesPost) HasPassword() bool { return false } -// GetActive returns the Active field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPost) GetActive() *bool { +// GetSecAuthActive returns the SecAuthActive field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPost) GetSecAuthActive() *bool { if o == nil { return nil } - return o.Active + return o.SecAuthActive } -// GetActiveOk returns a tuple with the Active field value +// GetSecAuthActiveOk returns a tuple with the SecAuthActive field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPost) GetActiveOk() (*bool, bool) { +func (o *UserPropertiesPost) GetSecAuthActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.Active, true + return o.SecAuthActive, true } -// SetActive sets field value -func (o *UserPropertiesPost) SetActive(v bool) { +// SetSecAuthActive sets field value +func (o *UserPropertiesPost) SetSecAuthActive(v bool) { - o.Active = &v + o.SecAuthActive = &v } -// HasActive returns a boolean if a field has been set. -func (o *UserPropertiesPost) HasActive() bool { - if o != nil && o.Active != nil { +// HasSecAuthActive returns a boolean if a field has been set. +func (o *UserPropertiesPost) HasSecAuthActive() bool { + if o != nil && o.SecAuthActive != nil { return true } @@ -358,30 +358,38 @@ func (o *UserPropertiesPost) HasActive() bool { func (o UserPropertiesPost) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Firstname != nil { - toSerialize["firstname"] = o.Firstname + if o.Active != nil { + toSerialize["active"] = o.Active } - if o.Lastname != nil { - toSerialize["lastname"] = o.Lastname + + if o.Administrator != nil { + toSerialize["administrator"] = o.Administrator } + if o.Email != nil { toSerialize["email"] = o.Email } - if o.Administrator != nil { - toSerialize["administrator"] = o.Administrator + + if o.Firstname != nil { + toSerialize["firstname"] = o.Firstname } + if o.ForceSecAuth != nil { toSerialize["forceSecAuth"] = o.ForceSecAuth } - if o.SecAuthActive != nil { - toSerialize["secAuthActive"] = o.SecAuthActive + + if o.Lastname != nil { + toSerialize["lastname"] = o.Lastname } + if o.Password != nil { toSerialize["password"] = o.Password } - if o.Active != nil { - toSerialize["active"] = o.Active + + if o.SecAuthActive != nil { + toSerialize["secAuthActive"] = o.SecAuthActive } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_put.go index 778c3b0cf..d0ad92946 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_properties_put.go @@ -16,22 +16,22 @@ import ( // UserPropertiesPut struct for UserPropertiesPut type UserPropertiesPut struct { + // Indicates if the user is active. + Active *bool `json:"active,omitempty"` + // Indicates if the user has admin rights. + Administrator *bool `json:"administrator,omitempty"` + // The email address of the user. + Email *string `json:"email,omitempty"` // The first name of the user. Firstname *string `json:"firstname,omitempty"` + // Indicates if secure authentication should be forced on the user. + ForceSecAuth *bool `json:"forceSecAuth,omitempty"` // The last name of the user. Lastname *string `json:"lastname,omitempty"` - // The email address of the user. - Email *string `json:"email,omitempty"` // password of the user Password *string `json:"password,omitempty"` - // Indicates if the user has admin rights. - Administrator *bool `json:"administrator,omitempty"` - // Indicates if secure authentication should be forced on the user. - ForceSecAuth *bool `json:"forceSecAuth,omitempty"` // Indicates if secure authentication is active for the user. SecAuthActive *bool `json:"secAuthActive,omitempty"` - // Indicates if the user is active. - Active *bool `json:"active,omitempty"` } // NewUserPropertiesPut instantiates a new UserPropertiesPut object @@ -52,76 +52,76 @@ func NewUserPropertiesPutWithDefaults() *UserPropertiesPut { return &this } -// GetFirstname returns the Firstname field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserPropertiesPut) GetFirstname() *string { +// GetActive returns the Active field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetActive() *bool { if o == nil { return nil } - return o.Firstname + return o.Active } -// GetFirstnameOk returns a tuple with the Firstname field value +// GetActiveOk returns a tuple with the Active field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetFirstnameOk() (*string, bool) { +func (o *UserPropertiesPut) GetActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.Firstname, true + return o.Active, true } -// SetFirstname sets field value -func (o *UserPropertiesPut) SetFirstname(v string) { +// SetActive sets field value +func (o *UserPropertiesPut) SetActive(v bool) { - o.Firstname = &v + o.Active = &v } -// HasFirstname returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasFirstname() bool { - if o != nil && o.Firstname != nil { +// HasActive returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasActive() bool { + if o != nil && o.Active != nil { return true } return false } -// GetLastname returns the Lastname field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserPropertiesPut) GetLastname() *string { +// GetAdministrator returns the Administrator field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetAdministrator() *bool { if o == nil { return nil } - return o.Lastname + return o.Administrator } -// GetLastnameOk returns a tuple with the Lastname field value +// GetAdministratorOk returns a tuple with the Administrator field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetLastnameOk() (*string, bool) { +func (o *UserPropertiesPut) GetAdministratorOk() (*bool, bool) { if o == nil { return nil, false } - return o.Lastname, true + return o.Administrator, true } -// SetLastname sets field value -func (o *UserPropertiesPut) SetLastname(v string) { +// SetAdministrator sets field value +func (o *UserPropertiesPut) SetAdministrator(v bool) { - o.Lastname = &v + o.Administrator = &v } -// HasLastname returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasLastname() bool { - if o != nil && o.Lastname != nil { +// HasAdministrator returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasAdministrator() bool { + if o != nil && o.Administrator != nil { return true } @@ -129,7 +129,7 @@ func (o *UserPropertiesPut) HasLastname() bool { } // GetEmail returns the Email field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *UserPropertiesPut) GetEmail() *string { if o == nil { return nil @@ -166,190 +166,190 @@ func (o *UserPropertiesPut) HasEmail() bool { return false } -// GetPassword returns the Password field value -// If the value is explicit nil, the zero value for string will be returned -func (o *UserPropertiesPut) GetPassword() *string { +// GetFirstname returns the Firstname field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetFirstname() *string { if o == nil { return nil } - return o.Password + return o.Firstname } -// GetPasswordOk returns a tuple with the Password field value +// GetFirstnameOk returns a tuple with the Firstname field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetPasswordOk() (*string, bool) { +func (o *UserPropertiesPut) GetFirstnameOk() (*string, bool) { if o == nil { return nil, false } - return o.Password, true + return o.Firstname, true } -// SetPassword sets field value -func (o *UserPropertiesPut) SetPassword(v string) { +// SetFirstname sets field value +func (o *UserPropertiesPut) SetFirstname(v string) { - o.Password = &v + o.Firstname = &v } -// HasPassword returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasPassword() bool { - if o != nil && o.Password != nil { +// HasFirstname returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasFirstname() bool { + if o != nil && o.Firstname != nil { return true } return false } -// GetAdministrator returns the Administrator field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPut) GetAdministrator() *bool { +// GetForceSecAuth returns the ForceSecAuth field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetForceSecAuth() *bool { if o == nil { return nil } - return o.Administrator + return o.ForceSecAuth } -// GetAdministratorOk returns a tuple with the Administrator field value +// GetForceSecAuthOk returns a tuple with the ForceSecAuth field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetAdministratorOk() (*bool, bool) { +func (o *UserPropertiesPut) GetForceSecAuthOk() (*bool, bool) { if o == nil { return nil, false } - return o.Administrator, true + return o.ForceSecAuth, true } -// SetAdministrator sets field value -func (o *UserPropertiesPut) SetAdministrator(v bool) { +// SetForceSecAuth sets field value +func (o *UserPropertiesPut) SetForceSecAuth(v bool) { - o.Administrator = &v + o.ForceSecAuth = &v } -// HasAdministrator returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasAdministrator() bool { - if o != nil && o.Administrator != nil { +// HasForceSecAuth returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasForceSecAuth() bool { + if o != nil && o.ForceSecAuth != nil { return true } return false } -// GetForceSecAuth returns the ForceSecAuth field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPut) GetForceSecAuth() *bool { +// GetLastname returns the Lastname field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetLastname() *string { if o == nil { return nil } - return o.ForceSecAuth + return o.Lastname } -// GetForceSecAuthOk returns a tuple with the ForceSecAuth field value +// GetLastnameOk returns a tuple with the Lastname field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetForceSecAuthOk() (*bool, bool) { +func (o *UserPropertiesPut) GetLastnameOk() (*string, bool) { if o == nil { return nil, false } - return o.ForceSecAuth, true + return o.Lastname, true } -// SetForceSecAuth sets field value -func (o *UserPropertiesPut) SetForceSecAuth(v bool) { +// SetLastname sets field value +func (o *UserPropertiesPut) SetLastname(v string) { - o.ForceSecAuth = &v + o.Lastname = &v } -// HasForceSecAuth returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasForceSecAuth() bool { - if o != nil && o.ForceSecAuth != nil { +// HasLastname returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasLastname() bool { + if o != nil && o.Lastname != nil { return true } return false } -// GetSecAuthActive returns the SecAuthActive field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPut) GetSecAuthActive() *bool { +// GetPassword returns the Password field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetPassword() *string { if o == nil { return nil } - return o.SecAuthActive + return o.Password } -// GetSecAuthActiveOk returns a tuple with the SecAuthActive field value +// GetPasswordOk returns a tuple with the Password field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetSecAuthActiveOk() (*bool, bool) { +func (o *UserPropertiesPut) GetPasswordOk() (*string, bool) { if o == nil { return nil, false } - return o.SecAuthActive, true + return o.Password, true } -// SetSecAuthActive sets field value -func (o *UserPropertiesPut) SetSecAuthActive(v bool) { +// SetPassword sets field value +func (o *UserPropertiesPut) SetPassword(v string) { - o.SecAuthActive = &v + o.Password = &v } -// HasSecAuthActive returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasSecAuthActive() bool { - if o != nil && o.SecAuthActive != nil { +// HasPassword returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasPassword() bool { + if o != nil && o.Password != nil { return true } return false } -// GetActive returns the Active field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *UserPropertiesPut) GetActive() *bool { +// GetSecAuthActive returns the SecAuthActive field value +// If the value is explicit nil, nil is returned +func (o *UserPropertiesPut) GetSecAuthActive() *bool { if o == nil { return nil } - return o.Active + return o.SecAuthActive } -// GetActiveOk returns a tuple with the Active field value +// GetSecAuthActiveOk returns a tuple with the SecAuthActive field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UserPropertiesPut) GetActiveOk() (*bool, bool) { +func (o *UserPropertiesPut) GetSecAuthActiveOk() (*bool, bool) { if o == nil { return nil, false } - return o.Active, true + return o.SecAuthActive, true } -// SetActive sets field value -func (o *UserPropertiesPut) SetActive(v bool) { +// SetSecAuthActive sets field value +func (o *UserPropertiesPut) SetSecAuthActive(v bool) { - o.Active = &v + o.SecAuthActive = &v } -// HasActive returns a boolean if a field has been set. -func (o *UserPropertiesPut) HasActive() bool { - if o != nil && o.Active != nil { +// HasSecAuthActive returns a boolean if a field has been set. +func (o *UserPropertiesPut) HasSecAuthActive() bool { + if o != nil && o.SecAuthActive != nil { return true } @@ -358,30 +358,38 @@ func (o *UserPropertiesPut) HasActive() bool { func (o UserPropertiesPut) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Firstname != nil { - toSerialize["firstname"] = o.Firstname + if o.Active != nil { + toSerialize["active"] = o.Active } - if o.Lastname != nil { - toSerialize["lastname"] = o.Lastname + + if o.Administrator != nil { + toSerialize["administrator"] = o.Administrator } + if o.Email != nil { toSerialize["email"] = o.Email } - if o.Password != nil { - toSerialize["password"] = o.Password - } - if o.Administrator != nil { - toSerialize["administrator"] = o.Administrator + + if o.Firstname != nil { + toSerialize["firstname"] = o.Firstname } + if o.ForceSecAuth != nil { toSerialize["forceSecAuth"] = o.ForceSecAuth } + + if o.Lastname != nil { + toSerialize["lastname"] = o.Lastname + } + + if o.Password != nil { + toSerialize["password"] = o.Password + } + if o.SecAuthActive != nil { toSerialize["secAuthActive"] = o.SecAuthActive } - if o.Active != nil { - toSerialize["active"] = o.Active - } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_put.go index b45261f2f..9e0c992c9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_user_put.go @@ -42,7 +42,7 @@ func NewUserPutWithDefaults() *UserPut { } // GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned +// If the value is explicit nil, nil is returned func (o *UserPut) GetId() *string { if o == nil { return nil @@ -80,7 +80,7 @@ func (o *UserPut) HasId() bool { } // GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for UserPropertiesPut will be returned +// If the value is explicit nil, nil is returned func (o *UserPut) GetProperties() *UserPropertiesPut { if o == nil { return nil @@ -122,9 +122,11 @@ func (o UserPut) MarshalJSON() ([]byte, error) { if o.Id != nil { toSerialize["id"] = o.Id } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_users.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_users.go index 0107d029b..e67610e19 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_users.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_users.go @@ -16,19 +16,19 @@ import ( // Users struct for Users type Users struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]User `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewUsers instantiates a new Users object @@ -49,114 +49,114 @@ func NewUsersWithDefaults() *Users { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Users) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Users) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Users) GetIdOk() (*string, bool) { +func (o *Users) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Users) SetId(v string) { +// SetLinks sets field value +func (o *Users) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Users) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Users) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Users) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Users) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Users) GetTypeOk() (*Type, bool) { +func (o *Users) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Users) SetType(v Type) { +// SetHref sets field value +func (o *Users) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Users) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Users) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Users) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Users) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Users) GetHrefOk() (*string, bool) { +func (o *Users) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Users) SetHref(v string) { +// SetId sets field value +func (o *Users) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Users) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Users) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Users) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []User will be returned +// If the value is explicit nil, nil is returned func (o *Users) GetItems() *[]User { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Users) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Users) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Users) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Users) GetOffsetOk() (*float32, bool) { +func (o *Users) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Users) SetOffset(v float32) { +// SetLimit sets field value +func (o *Users) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Users) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Users) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Users) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Users) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Users) GetLimitOk() (*float32, bool) { +func (o *Users) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Users) SetLimit(v float32) { +// SetOffset sets field value +func (o *Users) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Users) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Users) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Users) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Users) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Users) GetLinksOk() (*PaginationLinks, bool) { +func (o *Users) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Users) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Users) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Users) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Users) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Users) HasLinks() bool { func (o Users) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_users_entities.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_users_entities.go index 6c10d5a04..7619f6611 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_users_entities.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_users_entities.go @@ -16,8 +16,8 @@ import ( // UsersEntities struct for UsersEntities type UsersEntities struct { - Owns *ResourcesUsers `json:"owns,omitempty"` Groups *GroupUsers `json:"groups,omitempty"` + Owns *ResourcesUsers `json:"owns,omitempty"` } // NewUsersEntities instantiates a new UsersEntities object @@ -38,76 +38,76 @@ func NewUsersEntitiesWithDefaults() *UsersEntities { return &this } -// GetOwns returns the Owns field value -// If the value is explicit nil, the zero value for ResourcesUsers will be returned -func (o *UsersEntities) GetOwns() *ResourcesUsers { +// GetGroups returns the Groups field value +// If the value is explicit nil, nil is returned +func (o *UsersEntities) GetGroups() *GroupUsers { if o == nil { return nil } - return o.Owns + return o.Groups } -// GetOwnsOk returns a tuple with the Owns field value +// GetGroupsOk returns a tuple with the Groups field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UsersEntities) GetOwnsOk() (*ResourcesUsers, bool) { +func (o *UsersEntities) GetGroupsOk() (*GroupUsers, bool) { if o == nil { return nil, false } - return o.Owns, true + return o.Groups, true } -// SetOwns sets field value -func (o *UsersEntities) SetOwns(v ResourcesUsers) { +// SetGroups sets field value +func (o *UsersEntities) SetGroups(v GroupUsers) { - o.Owns = &v + o.Groups = &v } -// HasOwns returns a boolean if a field has been set. -func (o *UsersEntities) HasOwns() bool { - if o != nil && o.Owns != nil { +// HasGroups returns a boolean if a field has been set. +func (o *UsersEntities) HasGroups() bool { + if o != nil && o.Groups != nil { return true } return false } -// GetGroups returns the Groups field value -// If the value is explicit nil, the zero value for GroupUsers will be returned -func (o *UsersEntities) GetGroups() *GroupUsers { +// GetOwns returns the Owns field value +// If the value is explicit nil, nil is returned +func (o *UsersEntities) GetOwns() *ResourcesUsers { if o == nil { return nil } - return o.Groups + return o.Owns } -// GetGroupsOk returns a tuple with the Groups field value +// GetOwnsOk returns a tuple with the Owns field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *UsersEntities) GetGroupsOk() (*GroupUsers, bool) { +func (o *UsersEntities) GetOwnsOk() (*ResourcesUsers, bool) { if o == nil { return nil, false } - return o.Groups, true + return o.Owns, true } -// SetGroups sets field value -func (o *UsersEntities) SetGroups(v GroupUsers) { +// SetOwns sets field value +func (o *UsersEntities) SetOwns(v ResourcesUsers) { - o.Groups = &v + o.Owns = &v } -// HasGroups returns a boolean if a field has been set. -func (o *UsersEntities) HasGroups() bool { - if o != nil && o.Groups != nil { +// HasOwns returns a boolean if a field has been set. +func (o *UsersEntities) HasOwns() bool { + if o != nil && o.Owns != nil { return true } @@ -116,12 +116,14 @@ func (o *UsersEntities) HasGroups() bool { func (o UsersEntities) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Owns != nil { - toSerialize["owns"] = o.Owns - } if o.Groups != nil { toSerialize["groups"] = o.Groups } + + if o.Owns != nil { + toSerialize["owns"] = o.Owns + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go index d0b7b862e..59bf03f02 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go @@ -16,14 +16,14 @@ import ( // Volume struct for Volume type Volume struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` // The URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` + Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *VolumeProperties `json:"properties"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewVolume instantiates a new Volume object @@ -46,190 +46,190 @@ func NewVolumeWithDefaults() *Volume { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Volume) GetId() *string { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Volume) GetHref() *string { if o == nil { return nil } - return o.Id + return o.Href } -// GetIdOk returns a tuple with the Id field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volume) GetIdOk() (*string, bool) { +func (o *Volume) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Href, true } -// SetId sets field value -func (o *Volume) SetId(v string) { +// SetHref sets field value +func (o *Volume) SetHref(v string) { - o.Id = &v + o.Href = &v } -// HasId returns a boolean if a field has been set. -func (o *Volume) HasId() bool { - if o != nil && o.Id != nil { +// HasHref returns a boolean if a field has been set. +func (o *Volume) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Volume) GetType() *Type { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Volume) GetId() *string { if o == nil { return nil } - return o.Type + return o.Id } -// GetTypeOk returns a tuple with the Type field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volume) GetTypeOk() (*Type, bool) { +func (o *Volume) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Id, true } -// SetType sets field value -func (o *Volume) SetType(v Type) { +// SetId sets field value +func (o *Volume) SetId(v string) { - o.Type = &v + o.Id = &v } -// HasType returns a boolean if a field has been set. -func (o *Volume) HasType() bool { - if o != nil && o.Type != nil { +// HasId returns a boolean if a field has been set. +func (o *Volume) HasId() bool { + if o != nil && o.Id != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Volume) GetHref() *string { +// GetMetadata returns the Metadata field value +// If the value is explicit nil, nil is returned +func (o *Volume) GetMetadata() *DatacenterElementMetadata { if o == nil { return nil } - return o.Href + return o.Metadata } -// GetHrefOk returns a tuple with the Href field value +// GetMetadataOk returns a tuple with the Metadata field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volume) GetHrefOk() (*string, bool) { +func (o *Volume) GetMetadataOk() (*DatacenterElementMetadata, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Metadata, true } -// SetHref sets field value -func (o *Volume) SetHref(v string) { +// SetMetadata sets field value +func (o *Volume) SetMetadata(v DatacenterElementMetadata) { - o.Href = &v + o.Metadata = &v } -// HasHref returns a boolean if a field has been set. -func (o *Volume) HasHref() bool { - if o != nil && o.Href != nil { +// HasMetadata returns a boolean if a field has been set. +func (o *Volume) HasMetadata() bool { + if o != nil && o.Metadata != nil { return true } return false } -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *Volume) GetMetadata() *DatacenterElementMetadata { +// GetProperties returns the Properties field value +// If the value is explicit nil, nil is returned +func (o *Volume) GetProperties() *VolumeProperties { if o == nil { return nil } - return o.Metadata + return o.Properties } -// GetMetadataOk returns a tuple with the Metadata field value +// GetPropertiesOk returns a tuple with the Properties field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volume) GetMetadataOk() (*DatacenterElementMetadata, bool) { +func (o *Volume) GetPropertiesOk() (*VolumeProperties, bool) { if o == nil { return nil, false } - return o.Metadata, true + return o.Properties, true } -// SetMetadata sets field value -func (o *Volume) SetMetadata(v DatacenterElementMetadata) { +// SetProperties sets field value +func (o *Volume) SetProperties(v VolumeProperties) { - o.Metadata = &v + o.Properties = &v } -// HasMetadata returns a boolean if a field has been set. -func (o *Volume) HasMetadata() bool { - if o != nil && o.Metadata != nil { +// HasProperties returns a boolean if a field has been set. +func (o *Volume) HasProperties() bool { + if o != nil && o.Properties != nil { return true } return false } -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for VolumeProperties will be returned -func (o *Volume) GetProperties() *VolumeProperties { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Volume) GetType() *Type { if o == nil { return nil } - return o.Properties + return o.Type } -// GetPropertiesOk returns a tuple with the Properties field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volume) GetPropertiesOk() (*VolumeProperties, bool) { +func (o *Volume) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Properties, true + return o.Type, true } -// SetProperties sets field value -func (o *Volume) SetProperties(v VolumeProperties) { +// SetType sets field value +func (o *Volume) SetType(v Type) { - o.Properties = &v + o.Type = &v } -// HasProperties returns a boolean if a field has been set. -func (o *Volume) HasProperties() bool { - if o != nil && o.Properties != nil { +// HasType returns a boolean if a field has been set. +func (o *Volume) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -238,21 +238,26 @@ func (o *Volume) HasProperties() bool { func (o Volume) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Metadata != nil { toSerialize["metadata"] = o.Metadata } + if o.Properties != nil { toSerialize["properties"] = o.Properties } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go index 15ed6a148..6f6bc6b96 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go @@ -16,49 +16,50 @@ import ( // VolumeProperties struct for VolumeProperties type VolumeProperties struct { - // The name of the resource. - Name *string `json:"name,omitempty"` - // Hardware type of the volume. DAS (Direct Attached Storage) could be used only in a composite call with a Cube server. - Type *string `json:"type,omitempty"` - // The size of the volume in GB. - Size *float32 `json:"size"` // The availability zone in which the volume should be provisioned. The storage volume will be provisioned on as few physical storage devices as possible, but this cannot be guaranteed upfront. This is uavailable for DAS (Direct Attached Storage), and subject to availability for SSD. AvailabilityZone *string `json:"availabilityZone,omitempty"` + // The ID of the backup unit that the user has access to. The property is immutable and is only allowed to be set on creation of a new a volume. It is mandatory to provide either 'public image' or 'imageAlias' in conjunction with this property. + BackupunitId *string `json:"backupunitId,omitempty"` + // Determines whether the volume will be used as a boot volume. Set to `NONE`, the volume will not be used as boot volume. Set to `PRIMARY`, the volume will be used as boot volume and all other volumes must be set to `NONE`. Set to `AUTO` or `null` requires all volumes to be set to `AUTO` or `null`; this will use the legacy behavior, which is to use the volume as a boot volume only if there are no other volumes or cdrom devices. + // to set this field to `nil` in order to be marshalled, the explicit nil address `Nilstring` can be used, or the setter `SetBootOrderNil` + BootOrder *string `json:"bootOrder,omitempty"` + // The UUID of the attached server. + BootServer *string `json:"bootServer,omitempty"` + // The bus type for this volume; default is VIRTIO. + Bus *string `json:"bus,omitempty"` + // Hot-plug capable CPU (no reboot required). + CpuHotPlug *bool `json:"cpuHotPlug,omitempty"` + // The Logical Unit Number of the storage volume. Null for volumes, not mounted to a VM. + DeviceNumber *int64 `json:"deviceNumber,omitempty"` + // Hot-plug capable Virt-IO drive (no reboot required). + DiscVirtioHotPlug *bool `json:"discVirtioHotPlug,omitempty"` + // Hot-unplug capable Virt-IO drive (no reboot required). Not supported with Windows VMs. + DiscVirtioHotUnplug *bool `json:"discVirtioHotUnplug,omitempty"` // Image or snapshot ID to be used as template for this volume. - Image *string `json:"image,omitempty"` + Image *string `json:"image,omitempty"` + ImageAlias *string `json:"imageAlias,omitempty"` // Initial password to be set for installed OS. Works with public images only. Not modifiable, forbidden in update requests. Password rules allows all characters from a-z, A-Z, 0-9. ImagePassword *string `json:"imagePassword,omitempty"` - ImageAlias *string `json:"imageAlias,omitempty"` - // Public SSH keys are set on the image as authorized keys for appropriate SSH login to the instance using the corresponding private key. This field may only be set in creation requests. When reading, it always returns null. SSH keys are only supported if a public Linux image is used for the volume creation. - SshKeys *[]string `json:"sshKeys,omitempty"` - // The bus type for this volume; default is VIRTIO. - Bus *string `json:"bus,omitempty"` // OS type for this volume. LicenceType *string `json:"licenceType,omitempty"` - // Hot-plug capable CPU (no reboot required). - CpuHotPlug *bool `json:"cpuHotPlug,omitempty"` - // Hot-plug capable RAM (no reboot required). - RamHotPlug *bool `json:"ramHotPlug,omitempty"` + // The name of the resource. + Name *string `json:"name,omitempty"` // Hot-plug capable NIC (no reboot required). NicHotPlug *bool `json:"nicHotPlug,omitempty"` // Hot-unplug capable NIC (no reboot required). NicHotUnplug *bool `json:"nicHotUnplug,omitempty"` - // Hot-plug capable Virt-IO drive (no reboot required). - DiscVirtioHotPlug *bool `json:"discVirtioHotPlug,omitempty"` - // Hot-unplug capable Virt-IO drive (no reboot required). Not supported with Windows VMs. - DiscVirtioHotUnplug *bool `json:"discVirtioHotUnplug,omitempty"` - // The Logical Unit Number of the storage volume. Null for volumes, not mounted to a VM. - DeviceNumber *int64 `json:"deviceNumber,omitempty"` // The PCI slot number of the storage volume. Null for volumes, not mounted to a VM. PciSlot *int32 `json:"pciSlot,omitempty"` - // The ID of the backup unit that the user has access to. The property is immutable and is only allowed to be set on creation of a new a volume. It is mandatory to provide either 'public image' or 'imageAlias' in conjunction with this property. - BackupunitId *string `json:"backupunitId,omitempty"` + // Hot-plug capable RAM (no reboot required). + RamHotPlug *bool `json:"ramHotPlug,omitempty"` + // The size of the volume in GB. + Size *float32 `json:"size"` + // Public SSH keys are set on the image as authorized keys for appropriate SSH login to the instance using the corresponding private key. This field may only be set in creation requests. When reading, it always returns null. SSH keys are only supported if a public Linux image is used for the volume creation. + SshKeys *[]string `json:"sshKeys,omitempty"` + // Hardware type of the volume. DAS (Direct Attached Storage) could be used only in a composite call with a Cube server. + Type *string `json:"type,omitempty"` // The cloud-init configuration for the volume as base64 encoded string. The property is immutable and is only allowed to be set on creation of a new a volume. It is mandatory to provide either 'public image' or 'imageAlias' that has cloud-init compatibility in conjunction with this property. UserData *string `json:"userData,omitempty"` - // The UUID of the attached server. - BootServer *string `json:"bootServer,omitempty"` - // Determines whether the volume will be used as a boot volume. Set to `NONE`, the volume will not be used as boot volume. Set to `PRIMARY`, the volume will be used as boot volume and all other volumes must be set to `NONE`. Set to `AUTO` or `null` requires all volumes to be set to `AUTO` or `null`; this will use the legacy behavior, which is to use the volume as a boot volume only if there are no other volumes or cdrom devices. - BootOrder *string `json:"bootOrder,omitempty"` } // NewVolumeProperties instantiates a new VolumeProperties object @@ -68,9 +69,9 @@ type VolumeProperties struct { func NewVolumeProperties(size float32) *VolumeProperties { this := VolumeProperties{} - this.Size = &size var bootOrder = "AUTO" this.BootOrder = &bootOrder + this.Size = &size return &this } @@ -85,836 +86,841 @@ func NewVolumePropertiesWithDefaults() *VolumeProperties { return &this } -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetName() *string { +// GetAvailabilityZone returns the AvailabilityZone field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetAvailabilityZone() *string { if o == nil { return nil } - return o.Name + return o.AvailabilityZone } -// GetNameOk returns a tuple with the Name field value +// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetNameOk() (*string, bool) { +func (o *VolumeProperties) GetAvailabilityZoneOk() (*string, bool) { if o == nil { return nil, false } - return o.Name, true + return o.AvailabilityZone, true } -// SetName sets field value -func (o *VolumeProperties) SetName(v string) { +// SetAvailabilityZone sets field value +func (o *VolumeProperties) SetAvailabilityZone(v string) { - o.Name = &v + o.AvailabilityZone = &v } -// HasName returns a boolean if a field has been set. -func (o *VolumeProperties) HasName() bool { - if o != nil && o.Name != nil { +// HasAvailabilityZone returns a boolean if a field has been set. +func (o *VolumeProperties) HasAvailabilityZone() bool { + if o != nil && o.AvailabilityZone != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetType() *string { +// GetBackupunitId returns the BackupunitId field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetBackupunitId() *string { if o == nil { return nil } - return o.Type + return o.BackupunitId } -// GetTypeOk returns a tuple with the Type field value +// GetBackupunitIdOk returns a tuple with the BackupunitId field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetTypeOk() (*string, bool) { +func (o *VolumeProperties) GetBackupunitIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.BackupunitId, true } -// SetType sets field value -func (o *VolumeProperties) SetType(v string) { +// SetBackupunitId sets field value +func (o *VolumeProperties) SetBackupunitId(v string) { - o.Type = &v + o.BackupunitId = &v } -// HasType returns a boolean if a field has been set. -func (o *VolumeProperties) HasType() bool { - if o != nil && o.Type != nil { +// HasBackupunitId returns a boolean if a field has been set. +func (o *VolumeProperties) HasBackupunitId() bool { + if o != nil && o.BackupunitId != nil { return true } return false } -// GetSize returns the Size field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *VolumeProperties) GetSize() *float32 { +// GetBootOrder returns the BootOrder field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetBootOrder() *string { if o == nil { return nil } - return o.Size + return o.BootOrder } -// GetSizeOk returns a tuple with the Size field value +// GetBootOrderOk returns a tuple with the BootOrder field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetSizeOk() (*float32, bool) { +func (o *VolumeProperties) GetBootOrderOk() (*string, bool) { if o == nil { return nil, false } - return o.Size, true + return o.BootOrder, true } -// SetSize sets field value -func (o *VolumeProperties) SetSize(v float32) { +// SetBootOrder sets field value +func (o *VolumeProperties) SetBootOrder(v string) { - o.Size = &v + o.BootOrder = &v } -// HasSize returns a boolean if a field has been set. -func (o *VolumeProperties) HasSize() bool { - if o != nil && o.Size != nil { +// sets BootOrder to the explicit address that will be encoded as nil when marshaled +func (o *VolumeProperties) SetBootOrderNil() { + o.BootOrder = &Nilstring +} + +// HasBootOrder returns a boolean if a field has been set. +func (o *VolumeProperties) HasBootOrder() bool { + if o != nil && o.BootOrder != nil { return true } return false } -// GetAvailabilityZone returns the AvailabilityZone field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetAvailabilityZone() *string { +// GetBootServer returns the BootServer field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetBootServer() *string { if o == nil { return nil } - return o.AvailabilityZone + return o.BootServer } -// GetAvailabilityZoneOk returns a tuple with the AvailabilityZone field value +// GetBootServerOk returns a tuple with the BootServer field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetAvailabilityZoneOk() (*string, bool) { +func (o *VolumeProperties) GetBootServerOk() (*string, bool) { if o == nil { return nil, false } - return o.AvailabilityZone, true + return o.BootServer, true } -// SetAvailabilityZone sets field value -func (o *VolumeProperties) SetAvailabilityZone(v string) { +// SetBootServer sets field value +func (o *VolumeProperties) SetBootServer(v string) { - o.AvailabilityZone = &v + o.BootServer = &v } -// HasAvailabilityZone returns a boolean if a field has been set. -func (o *VolumeProperties) HasAvailabilityZone() bool { - if o != nil && o.AvailabilityZone != nil { +// HasBootServer returns a boolean if a field has been set. +func (o *VolumeProperties) HasBootServer() bool { + if o != nil && o.BootServer != nil { return true } return false } -// GetImage returns the Image field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetImage() *string { +// GetBus returns the Bus field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetBus() *string { if o == nil { return nil } - return o.Image + return o.Bus } -// GetImageOk returns a tuple with the Image field value +// GetBusOk returns a tuple with the Bus field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetImageOk() (*string, bool) { +func (o *VolumeProperties) GetBusOk() (*string, bool) { if o == nil { return nil, false } - return o.Image, true + return o.Bus, true } -// SetImage sets field value -func (o *VolumeProperties) SetImage(v string) { +// SetBus sets field value +func (o *VolumeProperties) SetBus(v string) { - o.Image = &v + o.Bus = &v } -// HasImage returns a boolean if a field has been set. -func (o *VolumeProperties) HasImage() bool { - if o != nil && o.Image != nil { +// HasBus returns a boolean if a field has been set. +func (o *VolumeProperties) HasBus() bool { + if o != nil && o.Bus != nil { return true } return false } -// GetImagePassword returns the ImagePassword field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetImagePassword() *string { +// GetCpuHotPlug returns the CpuHotPlug field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetCpuHotPlug() *bool { if o == nil { return nil } - return o.ImagePassword + return o.CpuHotPlug } -// GetImagePasswordOk returns a tuple with the ImagePassword field value +// GetCpuHotPlugOk returns a tuple with the CpuHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetImagePasswordOk() (*string, bool) { +func (o *VolumeProperties) GetCpuHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.ImagePassword, true + return o.CpuHotPlug, true } -// SetImagePassword sets field value -func (o *VolumeProperties) SetImagePassword(v string) { +// SetCpuHotPlug sets field value +func (o *VolumeProperties) SetCpuHotPlug(v bool) { - o.ImagePassword = &v + o.CpuHotPlug = &v } -// HasImagePassword returns a boolean if a field has been set. -func (o *VolumeProperties) HasImagePassword() bool { - if o != nil && o.ImagePassword != nil { +// HasCpuHotPlug returns a boolean if a field has been set. +func (o *VolumeProperties) HasCpuHotPlug() bool { + if o != nil && o.CpuHotPlug != nil { return true } return false } -// GetImageAlias returns the ImageAlias field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetImageAlias() *string { +// GetDeviceNumber returns the DeviceNumber field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetDeviceNumber() *int64 { if o == nil { return nil } - return o.ImageAlias + return o.DeviceNumber } -// GetImageAliasOk returns a tuple with the ImageAlias field value +// GetDeviceNumberOk returns a tuple with the DeviceNumber field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetImageAliasOk() (*string, bool) { +func (o *VolumeProperties) GetDeviceNumberOk() (*int64, bool) { if o == nil { return nil, false } - return o.ImageAlias, true + return o.DeviceNumber, true } -// SetImageAlias sets field value -func (o *VolumeProperties) SetImageAlias(v string) { +// SetDeviceNumber sets field value +func (o *VolumeProperties) SetDeviceNumber(v int64) { - o.ImageAlias = &v + o.DeviceNumber = &v } -// HasImageAlias returns a boolean if a field has been set. -func (o *VolumeProperties) HasImageAlias() bool { - if o != nil && o.ImageAlias != nil { +// HasDeviceNumber returns a boolean if a field has been set. +func (o *VolumeProperties) HasDeviceNumber() bool { + if o != nil && o.DeviceNumber != nil { return true } return false } -// GetSshKeys returns the SshKeys field value -// If the value is explicit nil, the zero value for []string will be returned -func (o *VolumeProperties) GetSshKeys() *[]string { +// GetDiscVirtioHotPlug returns the DiscVirtioHotPlug field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetDiscVirtioHotPlug() *bool { if o == nil { return nil } - return o.SshKeys + return o.DiscVirtioHotPlug } -// GetSshKeysOk returns a tuple with the SshKeys field value +// GetDiscVirtioHotPlugOk returns a tuple with the DiscVirtioHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetSshKeysOk() (*[]string, bool) { +func (o *VolumeProperties) GetDiscVirtioHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.SshKeys, true + return o.DiscVirtioHotPlug, true } -// SetSshKeys sets field value -func (o *VolumeProperties) SetSshKeys(v []string) { +// SetDiscVirtioHotPlug sets field value +func (o *VolumeProperties) SetDiscVirtioHotPlug(v bool) { - o.SshKeys = &v + o.DiscVirtioHotPlug = &v } -// HasSshKeys returns a boolean if a field has been set. -func (o *VolumeProperties) HasSshKeys() bool { - if o != nil && o.SshKeys != nil { +// HasDiscVirtioHotPlug returns a boolean if a field has been set. +func (o *VolumeProperties) HasDiscVirtioHotPlug() bool { + if o != nil && o.DiscVirtioHotPlug != nil { return true } return false } -// GetBus returns the Bus field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetBus() *string { +// GetDiscVirtioHotUnplug returns the DiscVirtioHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetDiscVirtioHotUnplug() *bool { if o == nil { return nil } - return o.Bus + return o.DiscVirtioHotUnplug } -// GetBusOk returns a tuple with the Bus field value +// GetDiscVirtioHotUnplugOk returns a tuple with the DiscVirtioHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetBusOk() (*string, bool) { +func (o *VolumeProperties) GetDiscVirtioHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.Bus, true + return o.DiscVirtioHotUnplug, true } -// SetBus sets field value -func (o *VolumeProperties) SetBus(v string) { +// SetDiscVirtioHotUnplug sets field value +func (o *VolumeProperties) SetDiscVirtioHotUnplug(v bool) { - o.Bus = &v + o.DiscVirtioHotUnplug = &v } -// HasBus returns a boolean if a field has been set. -func (o *VolumeProperties) HasBus() bool { - if o != nil && o.Bus != nil { +// HasDiscVirtioHotUnplug returns a boolean if a field has been set. +func (o *VolumeProperties) HasDiscVirtioHotUnplug() bool { + if o != nil && o.DiscVirtioHotUnplug != nil { return true } return false } -// GetLicenceType returns the LicenceType field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetLicenceType() *string { +// GetImage returns the Image field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetImage() *string { if o == nil { return nil } - return o.LicenceType + return o.Image } -// GetLicenceTypeOk returns a tuple with the LicenceType field value +// GetImageOk returns a tuple with the Image field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetLicenceTypeOk() (*string, bool) { +func (o *VolumeProperties) GetImageOk() (*string, bool) { if o == nil { return nil, false } - return o.LicenceType, true + return o.Image, true } -// SetLicenceType sets field value -func (o *VolumeProperties) SetLicenceType(v string) { +// SetImage sets field value +func (o *VolumeProperties) SetImage(v string) { - o.LicenceType = &v + o.Image = &v } -// HasLicenceType returns a boolean if a field has been set. -func (o *VolumeProperties) HasLicenceType() bool { - if o != nil && o.LicenceType != nil { +// HasImage returns a boolean if a field has been set. +func (o *VolumeProperties) HasImage() bool { + if o != nil && o.Image != nil { return true } return false } -// GetCpuHotPlug returns the CpuHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *VolumeProperties) GetCpuHotPlug() *bool { +// GetImageAlias returns the ImageAlias field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetImageAlias() *string { if o == nil { return nil } - return o.CpuHotPlug + return o.ImageAlias } -// GetCpuHotPlugOk returns a tuple with the CpuHotPlug field value +// GetImageAliasOk returns a tuple with the ImageAlias field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetCpuHotPlugOk() (*bool, bool) { +func (o *VolumeProperties) GetImageAliasOk() (*string, bool) { if o == nil { return nil, false } - return o.CpuHotPlug, true + return o.ImageAlias, true } -// SetCpuHotPlug sets field value -func (o *VolumeProperties) SetCpuHotPlug(v bool) { +// SetImageAlias sets field value +func (o *VolumeProperties) SetImageAlias(v string) { - o.CpuHotPlug = &v + o.ImageAlias = &v } -// HasCpuHotPlug returns a boolean if a field has been set. -func (o *VolumeProperties) HasCpuHotPlug() bool { - if o != nil && o.CpuHotPlug != nil { +// HasImageAlias returns a boolean if a field has been set. +func (o *VolumeProperties) HasImageAlias() bool { + if o != nil && o.ImageAlias != nil { return true } return false } -// GetRamHotPlug returns the RamHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *VolumeProperties) GetRamHotPlug() *bool { +// GetImagePassword returns the ImagePassword field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetImagePassword() *string { if o == nil { return nil } - return o.RamHotPlug + return o.ImagePassword } -// GetRamHotPlugOk returns a tuple with the RamHotPlug field value +// GetImagePasswordOk returns a tuple with the ImagePassword field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetRamHotPlugOk() (*bool, bool) { +func (o *VolumeProperties) GetImagePasswordOk() (*string, bool) { if o == nil { return nil, false } - return o.RamHotPlug, true + return o.ImagePassword, true } -// SetRamHotPlug sets field value -func (o *VolumeProperties) SetRamHotPlug(v bool) { +// SetImagePassword sets field value +func (o *VolumeProperties) SetImagePassword(v string) { - o.RamHotPlug = &v + o.ImagePassword = &v } -// HasRamHotPlug returns a boolean if a field has been set. -func (o *VolumeProperties) HasRamHotPlug() bool { - if o != nil && o.RamHotPlug != nil { +// HasImagePassword returns a boolean if a field has been set. +func (o *VolumeProperties) HasImagePassword() bool { + if o != nil && o.ImagePassword != nil { return true } return false } -// GetNicHotPlug returns the NicHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *VolumeProperties) GetNicHotPlug() *bool { +// GetLicenceType returns the LicenceType field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetLicenceType() *string { if o == nil { return nil } - return o.NicHotPlug + return o.LicenceType } -// GetNicHotPlugOk returns a tuple with the NicHotPlug field value +// GetLicenceTypeOk returns a tuple with the LicenceType field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetNicHotPlugOk() (*bool, bool) { +func (o *VolumeProperties) GetLicenceTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.NicHotPlug, true + return o.LicenceType, true } -// SetNicHotPlug sets field value -func (o *VolumeProperties) SetNicHotPlug(v bool) { +// SetLicenceType sets field value +func (o *VolumeProperties) SetLicenceType(v string) { - o.NicHotPlug = &v + o.LicenceType = &v } -// HasNicHotPlug returns a boolean if a field has been set. -func (o *VolumeProperties) HasNicHotPlug() bool { - if o != nil && o.NicHotPlug != nil { +// HasLicenceType returns a boolean if a field has been set. +func (o *VolumeProperties) HasLicenceType() bool { + if o != nil && o.LicenceType != nil { return true } return false } -// GetNicHotUnplug returns the NicHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *VolumeProperties) GetNicHotUnplug() *bool { +// GetName returns the Name field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetName() *string { if o == nil { return nil } - return o.NicHotUnplug + return o.Name } -// GetNicHotUnplugOk returns a tuple with the NicHotUnplug field value +// GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetNicHotUnplugOk() (*bool, bool) { +func (o *VolumeProperties) GetNameOk() (*string, bool) { if o == nil { return nil, false } - return o.NicHotUnplug, true + return o.Name, true } -// SetNicHotUnplug sets field value -func (o *VolumeProperties) SetNicHotUnplug(v bool) { +// SetName sets field value +func (o *VolumeProperties) SetName(v string) { - o.NicHotUnplug = &v + o.Name = &v } -// HasNicHotUnplug returns a boolean if a field has been set. -func (o *VolumeProperties) HasNicHotUnplug() bool { - if o != nil && o.NicHotUnplug != nil { +// HasName returns a boolean if a field has been set. +func (o *VolumeProperties) HasName() bool { + if o != nil && o.Name != nil { return true } return false } -// GetDiscVirtioHotPlug returns the DiscVirtioHotPlug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *VolumeProperties) GetDiscVirtioHotPlug() *bool { +// GetNicHotPlug returns the NicHotPlug field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetNicHotPlug() *bool { if o == nil { return nil } - return o.DiscVirtioHotPlug + return o.NicHotPlug } -// GetDiscVirtioHotPlugOk returns a tuple with the DiscVirtioHotPlug field value +// GetNicHotPlugOk returns a tuple with the NicHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetDiscVirtioHotPlugOk() (*bool, bool) { +func (o *VolumeProperties) GetNicHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.DiscVirtioHotPlug, true + return o.NicHotPlug, true } -// SetDiscVirtioHotPlug sets field value -func (o *VolumeProperties) SetDiscVirtioHotPlug(v bool) { +// SetNicHotPlug sets field value +func (o *VolumeProperties) SetNicHotPlug(v bool) { - o.DiscVirtioHotPlug = &v + o.NicHotPlug = &v } -// HasDiscVirtioHotPlug returns a boolean if a field has been set. -func (o *VolumeProperties) HasDiscVirtioHotPlug() bool { - if o != nil && o.DiscVirtioHotPlug != nil { +// HasNicHotPlug returns a boolean if a field has been set. +func (o *VolumeProperties) HasNicHotPlug() bool { + if o != nil && o.NicHotPlug != nil { return true } return false } -// GetDiscVirtioHotUnplug returns the DiscVirtioHotUnplug field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *VolumeProperties) GetDiscVirtioHotUnplug() *bool { +// GetNicHotUnplug returns the NicHotUnplug field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetNicHotUnplug() *bool { if o == nil { return nil } - return o.DiscVirtioHotUnplug + return o.NicHotUnplug } -// GetDiscVirtioHotUnplugOk returns a tuple with the DiscVirtioHotUnplug field value +// GetNicHotUnplugOk returns a tuple with the NicHotUnplug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetDiscVirtioHotUnplugOk() (*bool, bool) { +func (o *VolumeProperties) GetNicHotUnplugOk() (*bool, bool) { if o == nil { return nil, false } - return o.DiscVirtioHotUnplug, true + return o.NicHotUnplug, true } -// SetDiscVirtioHotUnplug sets field value -func (o *VolumeProperties) SetDiscVirtioHotUnplug(v bool) { +// SetNicHotUnplug sets field value +func (o *VolumeProperties) SetNicHotUnplug(v bool) { - o.DiscVirtioHotUnplug = &v + o.NicHotUnplug = &v } -// HasDiscVirtioHotUnplug returns a boolean if a field has been set. -func (o *VolumeProperties) HasDiscVirtioHotUnplug() bool { - if o != nil && o.DiscVirtioHotUnplug != nil { +// HasNicHotUnplug returns a boolean if a field has been set. +func (o *VolumeProperties) HasNicHotUnplug() bool { + if o != nil && o.NicHotUnplug != nil { return true } return false } -// GetDeviceNumber returns the DeviceNumber field value -// If the value is explicit nil, the zero value for int64 will be returned -func (o *VolumeProperties) GetDeviceNumber() *int64 { +// GetPciSlot returns the PciSlot field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetPciSlot() *int32 { if o == nil { return nil } - return o.DeviceNumber + return o.PciSlot } -// GetDeviceNumberOk returns a tuple with the DeviceNumber field value +// GetPciSlotOk returns a tuple with the PciSlot field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetDeviceNumberOk() (*int64, bool) { +func (o *VolumeProperties) GetPciSlotOk() (*int32, bool) { if o == nil { return nil, false } - return o.DeviceNumber, true + return o.PciSlot, true } -// SetDeviceNumber sets field value -func (o *VolumeProperties) SetDeviceNumber(v int64) { +// SetPciSlot sets field value +func (o *VolumeProperties) SetPciSlot(v int32) { - o.DeviceNumber = &v + o.PciSlot = &v } -// HasDeviceNumber returns a boolean if a field has been set. -func (o *VolumeProperties) HasDeviceNumber() bool { - if o != nil && o.DeviceNumber != nil { +// HasPciSlot returns a boolean if a field has been set. +func (o *VolumeProperties) HasPciSlot() bool { + if o != nil && o.PciSlot != nil { return true } return false } -// GetPciSlot returns the PciSlot field value -// If the value is explicit nil, the zero value for int32 will be returned -func (o *VolumeProperties) GetPciSlot() *int32 { +// GetRamHotPlug returns the RamHotPlug field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetRamHotPlug() *bool { if o == nil { return nil } - return o.PciSlot + return o.RamHotPlug } -// GetPciSlotOk returns a tuple with the PciSlot field value +// GetRamHotPlugOk returns a tuple with the RamHotPlug field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetPciSlotOk() (*int32, bool) { +func (o *VolumeProperties) GetRamHotPlugOk() (*bool, bool) { if o == nil { return nil, false } - return o.PciSlot, true + return o.RamHotPlug, true } -// SetPciSlot sets field value -func (o *VolumeProperties) SetPciSlot(v int32) { +// SetRamHotPlug sets field value +func (o *VolumeProperties) SetRamHotPlug(v bool) { - o.PciSlot = &v + o.RamHotPlug = &v } -// HasPciSlot returns a boolean if a field has been set. -func (o *VolumeProperties) HasPciSlot() bool { - if o != nil && o.PciSlot != nil { +// HasRamHotPlug returns a boolean if a field has been set. +func (o *VolumeProperties) HasRamHotPlug() bool { + if o != nil && o.RamHotPlug != nil { return true } return false } -// GetBackupunitId returns the BackupunitId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetBackupunitId() *string { +// GetSize returns the Size field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetSize() *float32 { if o == nil { return nil } - return o.BackupunitId + return o.Size } -// GetBackupunitIdOk returns a tuple with the BackupunitId field value +// GetSizeOk returns a tuple with the Size field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetBackupunitIdOk() (*string, bool) { +func (o *VolumeProperties) GetSizeOk() (*float32, bool) { if o == nil { return nil, false } - return o.BackupunitId, true + return o.Size, true } -// SetBackupunitId sets field value -func (o *VolumeProperties) SetBackupunitId(v string) { +// SetSize sets field value +func (o *VolumeProperties) SetSize(v float32) { - o.BackupunitId = &v + o.Size = &v } -// HasBackupunitId returns a boolean if a field has been set. -func (o *VolumeProperties) HasBackupunitId() bool { - if o != nil && o.BackupunitId != nil { +// HasSize returns a boolean if a field has been set. +func (o *VolumeProperties) HasSize() bool { + if o != nil && o.Size != nil { return true } return false } -// GetUserData returns the UserData field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetUserData() *string { +// GetSshKeys returns the SshKeys field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetSshKeys() *[]string { if o == nil { return nil } - return o.UserData + return o.SshKeys } -// GetUserDataOk returns a tuple with the UserData field value +// GetSshKeysOk returns a tuple with the SshKeys field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetUserDataOk() (*string, bool) { +func (o *VolumeProperties) GetSshKeysOk() (*[]string, bool) { if o == nil { return nil, false } - return o.UserData, true + return o.SshKeys, true } -// SetUserData sets field value -func (o *VolumeProperties) SetUserData(v string) { +// SetSshKeys sets field value +func (o *VolumeProperties) SetSshKeys(v []string) { - o.UserData = &v + o.SshKeys = &v } -// HasUserData returns a boolean if a field has been set. -func (o *VolumeProperties) HasUserData() bool { - if o != nil && o.UserData != nil { +// HasSshKeys returns a boolean if a field has been set. +func (o *VolumeProperties) HasSshKeys() bool { + if o != nil && o.SshKeys != nil { return true } return false } -// GetBootServer returns the BootServer field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetBootServer() *string { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetType() *string { if o == nil { return nil } - return o.BootServer + return o.Type } -// GetBootServerOk returns a tuple with the BootServer field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetBootServerOk() (*string, bool) { +func (o *VolumeProperties) GetTypeOk() (*string, bool) { if o == nil { return nil, false } - return o.BootServer, true + return o.Type, true } -// SetBootServer sets field value -func (o *VolumeProperties) SetBootServer(v string) { +// SetType sets field value +func (o *VolumeProperties) SetType(v string) { - o.BootServer = &v + o.Type = &v } -// HasBootServer returns a boolean if a field has been set. -func (o *VolumeProperties) HasBootServer() bool { - if o != nil && o.BootServer != nil { +// HasType returns a boolean if a field has been set. +func (o *VolumeProperties) HasType() bool { + if o != nil && o.Type != nil { return true } return false } -// GetBootOrder returns the BootOrder field value -// If the value is explicit nil, the zero value for string will be returned -func (o *VolumeProperties) GetBootOrder() *string { +// GetUserData returns the UserData field value +// If the value is explicit nil, nil is returned +func (o *VolumeProperties) GetUserData() *string { if o == nil { return nil } - return o.BootOrder + return o.UserData } -// GetBootOrderOk returns a tuple with the BootOrder field value +// GetUserDataOk returns a tuple with the UserData field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *VolumeProperties) GetBootOrderOk() (*string, bool) { +func (o *VolumeProperties) GetUserDataOk() (*string, bool) { if o == nil { return nil, false } - return o.BootOrder, true + return o.UserData, true } -// SetBootOrder sets field value -func (o *VolumeProperties) SetBootOrder(v string) { +// SetUserData sets field value +func (o *VolumeProperties) SetUserData(v string) { - o.BootOrder = &v + o.UserData = &v } -// HasBootOrder returns a boolean if a field has been set. -func (o *VolumeProperties) HasBootOrder() bool { - if o != nil && o.BootOrder != nil { +// HasUserData returns a boolean if a field has been set. +func (o *VolumeProperties) HasUserData() bool { + if o != nil && o.UserData != nil { return true } @@ -923,70 +929,95 @@ func (o *VolumeProperties) HasBootOrder() bool { func (o VolumeProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name + if o.AvailabilityZone != nil { + toSerialize["availabilityZone"] = o.AvailabilityZone } - if o.Type != nil { - toSerialize["type"] = o.Type + + if o.BackupunitId != nil { + toSerialize["backupunitId"] = o.BackupunitId } - if o.Size != nil { - toSerialize["size"] = o.Size + + if o.BootOrder == &Nilstring { + toSerialize["bootOrder"] = nil + } else if o.BootOrder != nil { + toSerialize["bootOrder"] = o.BootOrder } - if o.AvailabilityZone != nil { - toSerialize["availabilityZone"] = o.AvailabilityZone + if o.BootServer != nil { + toSerialize["bootServer"] = o.BootServer + } + + if o.Bus != nil { + toSerialize["bus"] = o.Bus + } + + if o.CpuHotPlug != nil { + toSerialize["cpuHotPlug"] = o.CpuHotPlug + } + + if o.DeviceNumber != nil { + toSerialize["deviceNumber"] = o.DeviceNumber + } + + if o.DiscVirtioHotPlug != nil { + toSerialize["discVirtioHotPlug"] = o.DiscVirtioHotPlug } + + if o.DiscVirtioHotUnplug != nil { + toSerialize["discVirtioHotUnplug"] = o.DiscVirtioHotUnplug + } + if o.Image != nil { toSerialize["image"] = o.Image } - if o.ImagePassword != nil { - toSerialize["imagePassword"] = o.ImagePassword - } + if o.ImageAlias != nil { toSerialize["imageAlias"] = o.ImageAlias } - if o.SshKeys != nil { - toSerialize["sshKeys"] = o.SshKeys - } - if o.Bus != nil { - toSerialize["bus"] = o.Bus + + if o.ImagePassword != nil { + toSerialize["imagePassword"] = o.ImagePassword } + if o.LicenceType != nil { toSerialize["licenceType"] = o.LicenceType } - if o.CpuHotPlug != nil { - toSerialize["cpuHotPlug"] = o.CpuHotPlug - } - if o.RamHotPlug != nil { - toSerialize["ramHotPlug"] = o.RamHotPlug + + if o.Name != nil { + toSerialize["name"] = o.Name } + if o.NicHotPlug != nil { toSerialize["nicHotPlug"] = o.NicHotPlug } + if o.NicHotUnplug != nil { toSerialize["nicHotUnplug"] = o.NicHotUnplug } - if o.DiscVirtioHotPlug != nil { - toSerialize["discVirtioHotPlug"] = o.DiscVirtioHotPlug + + if o.PciSlot != nil { + toSerialize["pciSlot"] = o.PciSlot } - if o.DiscVirtioHotUnplug != nil { - toSerialize["discVirtioHotUnplug"] = o.DiscVirtioHotUnplug + + if o.RamHotPlug != nil { + toSerialize["ramHotPlug"] = o.RamHotPlug } - if o.DeviceNumber != nil { - toSerialize["deviceNumber"] = o.DeviceNumber + + if o.Size != nil { + toSerialize["size"] = o.Size } - if o.PciSlot != nil { - toSerialize["pciSlot"] = o.PciSlot + + if o.SshKeys != nil { + toSerialize["sshKeys"] = o.SshKeys } - if o.BackupunitId != nil { - toSerialize["backupunitId"] = o.BackupunitId + + if o.Type != nil { + toSerialize["type"] = o.Type } + if o.UserData != nil { toSerialize["userData"] = o.UserData } - if o.BootServer != nil { - toSerialize["bootServer"] = o.BootServer - } - toSerialize["bootOrder"] = o.BootOrder + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volumes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volumes.go index dfe753d8e..ec795374f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volumes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volumes.go @@ -16,19 +16,19 @@ import ( // Volumes struct for Volumes type Volumes struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` + Links *PaginationLinks `json:"_links,omitempty"` // URL to the object representation (absolute path). Href *string `json:"href,omitempty"` + // The resource's unique identifier. + Id *string `json:"id,omitempty"` // Array of items in the collection. Items *[]Volume `json:"items,omitempty"` + // The limit (if specified in the request). + Limit *float32 `json:"limit,omitempty"` // The offset (if specified in the request). Offset *float32 `json:"offset,omitempty"` - // The limit (if specified in the request). - Limit *float32 `json:"limit,omitempty"` - Links *PaginationLinks `json:"_links,omitempty"` + // The type of object that has been created. + Type *Type `json:"type,omitempty"` } // NewVolumes instantiates a new Volumes object @@ -49,114 +49,114 @@ func NewVolumesWithDefaults() *Volumes { return &this } -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Volumes) GetId() *string { +// GetLinks returns the Links field value +// If the value is explicit nil, nil is returned +func (o *Volumes) GetLinks() *PaginationLinks { if o == nil { return nil } - return o.Id + return o.Links } -// GetIdOk returns a tuple with the Id field value +// GetLinksOk returns a tuple with the Links field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volumes) GetIdOk() (*string, bool) { +func (o *Volumes) GetLinksOk() (*PaginationLinks, bool) { if o == nil { return nil, false } - return o.Id, true + return o.Links, true } -// SetId sets field value -func (o *Volumes) SetId(v string) { +// SetLinks sets field value +func (o *Volumes) SetLinks(v PaginationLinks) { - o.Id = &v + o.Links = &v } -// HasId returns a boolean if a field has been set. -func (o *Volumes) HasId() bool { - if o != nil && o.Id != nil { +// HasLinks returns a boolean if a field has been set. +func (o *Volumes) HasLinks() bool { + if o != nil && o.Links != nil { return true } return false } -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *Volumes) GetType() *Type { +// GetHref returns the Href field value +// If the value is explicit nil, nil is returned +func (o *Volumes) GetHref() *string { if o == nil { return nil } - return o.Type + return o.Href } -// GetTypeOk returns a tuple with the Type field value +// GetHrefOk returns a tuple with the Href field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volumes) GetTypeOk() (*Type, bool) { +func (o *Volumes) GetHrefOk() (*string, bool) { if o == nil { return nil, false } - return o.Type, true + return o.Href, true } -// SetType sets field value -func (o *Volumes) SetType(v Type) { +// SetHref sets field value +func (o *Volumes) SetHref(v string) { - o.Type = &v + o.Href = &v } -// HasType returns a boolean if a field has been set. -func (o *Volumes) HasType() bool { - if o != nil && o.Type != nil { +// HasHref returns a boolean if a field has been set. +func (o *Volumes) HasHref() bool { + if o != nil && o.Href != nil { return true } return false } -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *Volumes) GetHref() *string { +// GetId returns the Id field value +// If the value is explicit nil, nil is returned +func (o *Volumes) GetId() *string { if o == nil { return nil } - return o.Href + return o.Id } -// GetHrefOk returns a tuple with the Href field value +// GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volumes) GetHrefOk() (*string, bool) { +func (o *Volumes) GetIdOk() (*string, bool) { if o == nil { return nil, false } - return o.Href, true + return o.Id, true } -// SetHref sets field value -func (o *Volumes) SetHref(v string) { +// SetId sets field value +func (o *Volumes) SetId(v string) { - o.Href = &v + o.Id = &v } -// HasHref returns a boolean if a field has been set. -func (o *Volumes) HasHref() bool { - if o != nil && o.Href != nil { +// HasId returns a boolean if a field has been set. +func (o *Volumes) HasId() bool { + if o != nil && o.Id != nil { return true } @@ -164,7 +164,7 @@ func (o *Volumes) HasHref() bool { } // GetItems returns the Items field value -// If the value is explicit nil, the zero value for []Volume will be returned +// If the value is explicit nil, nil is returned func (o *Volumes) GetItems() *[]Volume { if o == nil { return nil @@ -201,114 +201,114 @@ func (o *Volumes) HasItems() bool { return false } -// GetOffset returns the Offset field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Volumes) GetOffset() *float32 { +// GetLimit returns the Limit field value +// If the value is explicit nil, nil is returned +func (o *Volumes) GetLimit() *float32 { if o == nil { return nil } - return o.Offset + return o.Limit } -// GetOffsetOk returns a tuple with the Offset field value +// GetLimitOk returns a tuple with the Limit field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volumes) GetOffsetOk() (*float32, bool) { +func (o *Volumes) GetLimitOk() (*float32, bool) { if o == nil { return nil, false } - return o.Offset, true + return o.Limit, true } -// SetOffset sets field value -func (o *Volumes) SetOffset(v float32) { +// SetLimit sets field value +func (o *Volumes) SetLimit(v float32) { - o.Offset = &v + o.Limit = &v } -// HasOffset returns a boolean if a field has been set. -func (o *Volumes) HasOffset() bool { - if o != nil && o.Offset != nil { +// HasLimit returns a boolean if a field has been set. +func (o *Volumes) HasLimit() bool { + if o != nil && o.Limit != nil { return true } return false } -// GetLimit returns the Limit field value -// If the value is explicit nil, the zero value for float32 will be returned -func (o *Volumes) GetLimit() *float32 { +// GetOffset returns the Offset field value +// If the value is explicit nil, nil is returned +func (o *Volumes) GetOffset() *float32 { if o == nil { return nil } - return o.Limit + return o.Offset } -// GetLimitOk returns a tuple with the Limit field value +// GetOffsetOk returns a tuple with the Offset field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volumes) GetLimitOk() (*float32, bool) { +func (o *Volumes) GetOffsetOk() (*float32, bool) { if o == nil { return nil, false } - return o.Limit, true + return o.Offset, true } -// SetLimit sets field value -func (o *Volumes) SetLimit(v float32) { +// SetOffset sets field value +func (o *Volumes) SetOffset(v float32) { - o.Limit = &v + o.Offset = &v } -// HasLimit returns a boolean if a field has been set. -func (o *Volumes) HasLimit() bool { - if o != nil && o.Limit != nil { +// HasOffset returns a boolean if a field has been set. +func (o *Volumes) HasOffset() bool { + if o != nil && o.Offset != nil { return true } return false } -// GetLinks returns the Links field value -// If the value is explicit nil, the zero value for PaginationLinks will be returned -func (o *Volumes) GetLinks() *PaginationLinks { +// GetType returns the Type field value +// If the value is explicit nil, nil is returned +func (o *Volumes) GetType() *Type { if o == nil { return nil } - return o.Links + return o.Type } -// GetLinksOk returns a tuple with the Links field value +// GetTypeOk returns a tuple with the Type field value // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Volumes) GetLinksOk() (*PaginationLinks, bool) { +func (o *Volumes) GetTypeOk() (*Type, bool) { if o == nil { return nil, false } - return o.Links, true + return o.Type, true } -// SetLinks sets field value -func (o *Volumes) SetLinks(v PaginationLinks) { +// SetType sets field value +func (o *Volumes) SetType(v Type) { - o.Links = &v + o.Type = &v } -// HasLinks returns a boolean if a field has been set. -func (o *Volumes) HasLinks() bool { - if o != nil && o.Links != nil { +// HasType returns a boolean if a field has been set. +func (o *Volumes) HasType() bool { + if o != nil && o.Type != nil { return true } @@ -317,27 +317,34 @@ func (o *Volumes) HasLinks() bool { func (o Volumes) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type + if o.Links != nil { + toSerialize["_links"] = o.Links } + if o.Href != nil { toSerialize["href"] = o.Href } + + if o.Id != nil { + toSerialize["id"] = o.Id + } + if o.Items != nil { toSerialize["items"] = o.Items } - if o.Offset != nil { - toSerialize["offset"] = o.Offset - } + if o.Limit != nil { toSerialize["limit"] = o.Limit } - if o.Links != nil { - toSerialize["_links"] = o.Links + + if o.Offset != nil { + toSerialize["offset"] = o.Offset } + + if o.Type != nil { + toSerialize["type"] = o.Type + } + return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go b/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go index ee096c731..08c0967d4 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go @@ -17,6 +17,14 @@ import ( "time" ) +var ( + // used to set a nullable field to nil. This is a sentinel address that will be checked in the MarshalJson function. + // if set to this address, a nil value will be marshalled + Nilstring string = "<>" + Nilint32 int32 = -334455 + Nilbool bool = false +) + // ToPtr - returns a pointer to the given value. func ToPtr[T any](v T) *T { return &v diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 000000000..d31b37815 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..7a008a4d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@v0.9.3 + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 000000000..87d557477 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..4002a16a6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,642 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
      + See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
      + +
      + See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
      + + +
      + See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
      + +
      + See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
      + +
      + See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
      + +
      + See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
      + +
      + See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
      + +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..ca6685e2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 000000000..ea7324da6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 000000000..f65eb3909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 000000000..43e463611 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 000000000..abade2d60 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 000000000..dac97e58a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,682 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 000000000..535cbadfd --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 000000000..b3d262958 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 000000000..8b6e5c663 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 000000000..e36d9742f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 000000000..b4d7164e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,103 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 000000000..4dcab8d23 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,44 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 000000000..4ee4fa18d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,749 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 000000000..54bd08b25 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..ba7e8e6b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..c4c7ab2d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..908c17de6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 000000000..e8ad17ad0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 000000000..40796a49d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 000000000..77395a6b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 000000000..13c6040a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 000000000..2aa6a95a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 000000000..34d01f4aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..2263853fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 000000000..bdd49c8b2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 000000000..97299d499 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,140 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 000000000..78b3c61be --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,113 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 000000000..9f17ce601 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 000000000..fd4a36f73 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,874 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 000000000..01a01e486 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 000000000..55a388553 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 000000000..0e59a242d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..f6a240970 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,229 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 000000000..f04aaa21e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 000000000..774c5f00f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..ca0951452 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,161 @@ +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..5ca46038a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..9819d4145 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,530 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return + } + if debugAsserts { + if offset <= 0 { + panic(offset) + } + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if rep < 0 { + // Extend candidate match backwards as far as possible. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + if debugAsserts && s <= nextEmit { + panic("s <= nextEmit") + } + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + index0 := s + 1 + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + index0 := s + 1 + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index old s + 1 -> s - 1 + for index0 < s { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..8582f31a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1242 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 000000000..a154c18f7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 000000000..f45a3da7d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 000000000..4de0aed0d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,624 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 000000000..faaf81921 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 000000000..53e160f7e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,413 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 000000000..4ef7f5a3e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 000000000..2f8860a72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000..d04a829b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000..bcde39869 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000..332e51fe4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 000000000..ab26326a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 000000000..474cb77d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 000000000..5d73c21eb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 000000000..09164856d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 000000000..777290d44 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 000000000..fc40c8200 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 000000000..ddb63aa91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..17901e080 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 000000000..d4221edf4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 000000000..0be16cefc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 000000000..6f3b0cb10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..9a7655c0f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..57b9c31c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 000000000..9405fcf10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,508 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..8adabd828 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..b6f4ba6fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4175 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..ac2a80d29 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 000000000..8014174a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 000000000..9e1baad73 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,435 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..29c15c8c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 000000000..4be7cc736 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,121 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/kylelemons/godebug/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 000000000..200e596c6 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/.gitignore b/vendor/github.com/kylelemons/godebug/pretty/.gitignore new file mode 100644 index 000000000..fa9a735da --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/.gitignore @@ -0,0 +1,5 @@ +*.test +*.bench +*.golden +*.txt +*.prof diff --git a/vendor/github.com/kylelemons/godebug/pretty/doc.go b/vendor/github.com/kylelemons/godebug/pretty/doc.go new file mode 100644 index 000000000..03b5718a7 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/doc.go @@ -0,0 +1,25 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pretty pretty-prints Go structures. +// +// This package uses reflection to examine a Go value and can +// print out in a nice, aligned fashion. It supports three +// modes (normal, compact, and extended) for advanced use. +// +// See the Reflect and Print examples for what the output looks like. +package pretty + +// TODO: +// - Catch cycles diff --git a/vendor/github.com/kylelemons/godebug/pretty/public.go b/vendor/github.com/kylelemons/godebug/pretty/public.go new file mode 100644 index 000000000..fbc5d7abb --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/public.go @@ -0,0 +1,188 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bytes" + "fmt" + "io" + "net" + "reflect" + "time" + + "github.com/kylelemons/godebug/diff" +) + +// A Config represents optional configuration parameters for formatting. +// +// Some options, notably ShortList, dramatically increase the overhead +// of pretty-printing a value. +type Config struct { + // Verbosity options + Compact bool // One-line output. Overrides Diffable. + Diffable bool // Adds extra newlines for more easily diffable output. + + // Field and value options + IncludeUnexported bool // Include unexported fields in output + PrintStringers bool // Call String on a fmt.Stringer + PrintTextMarshalers bool // Call MarshalText on an encoding.TextMarshaler + SkipZeroFields bool // Skip struct fields that have a zero value. + + // Output transforms + ShortList int // Maximum character length for short lists if nonzero. + + // Type-specific overrides + // + // Formatter maps a type to a function that will provide a one-line string + // representation of the input value. Conceptually: + // Formatter[reflect.TypeOf(v)](v) = "v as a string" + // + // Note that the first argument need not explicitly match the type, it must + // merely be callable with it. + // + // When processing an input value, if its type exists as a key in Formatter: + // 1) If the value is nil, no stringification is performed. + // This allows overriding of PrintStringers and PrintTextMarshalers. + // 2) The value will be called with the input as its only argument. + // The function must return a string as its first return value. + // + // In addition to func literals, two common values for this will be: + // fmt.Sprint (function) func Sprint(...interface{}) string + // Type.String (method) func (Type) String() string + // + // Note that neither of these work if the String method is a pointer + // method and the input will be provided as a value. In that case, + // use a function that calls .String on the formal value parameter. + Formatter map[reflect.Type]interface{} + + // If TrackCycles is enabled, pretty will detect and track + // self-referential structures. If a self-referential structure (aka a + // "recursive" value) is detected, numbered placeholders will be emitted. + // + // Pointer tracking is disabled by default for performance reasons. + TrackCycles bool +} + +// Default Config objects +var ( + // DefaultFormatter is the default set of overrides for stringification. + DefaultFormatter = map[reflect.Type]interface{}{ + reflect.TypeOf(time.Time{}): fmt.Sprint, + reflect.TypeOf(net.IP{}): fmt.Sprint, + reflect.TypeOf((*error)(nil)).Elem(): fmt.Sprint, + } + + // CompareConfig is the default configuration used for Compare. + CompareConfig = &Config{ + Diffable: true, + IncludeUnexported: true, + Formatter: DefaultFormatter, + } + + // DefaultConfig is the default configuration used for all other top-level functions. + DefaultConfig = &Config{ + Formatter: DefaultFormatter, + } + + // CycleTracker is a convenience config for formatting and comparing recursive structures. + CycleTracker = &Config{ + Diffable: true, + Formatter: DefaultFormatter, + TrackCycles: true, + } +) + +func (cfg *Config) fprint(buf *bytes.Buffer, vals ...interface{}) { + ref := &reflector{ + Config: cfg, + } + if cfg.TrackCycles { + ref.pointerTracker = new(pointerTracker) + } + for i, val := range vals { + if i > 0 { + buf.WriteByte('\n') + } + newFormatter(cfg, buf).write(ref.val2node(reflect.ValueOf(val))) + } +} + +// Print writes the DefaultConfig representation of the given values to standard output. +func Print(vals ...interface{}) { + DefaultConfig.Print(vals...) +} + +// Print writes the configured presentation of the given values to standard output. +func (cfg *Config) Print(vals ...interface{}) { + fmt.Println(cfg.Sprint(vals...)) +} + +// Sprint returns a string representation of the given value according to the DefaultConfig. +func Sprint(vals ...interface{}) string { + return DefaultConfig.Sprint(vals...) +} + +// Sprint returns a string representation of the given value according to cfg. +func (cfg *Config) Sprint(vals ...interface{}) string { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.String() +} + +// Fprint writes the representation of the given value to the writer according to the DefaultConfig. +func Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + return DefaultConfig.Fprint(w, vals...) +} + +// Fprint writes the representation of the given value to the writer according to the cfg. +func (cfg *Config) Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.WriteTo(w) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in a and b, using the CompareConfig. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func Compare(a, b interface{}) string { + return CompareConfig.Compare(a, b) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in got and want according to the cfg. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func (cfg *Config) Compare(a, b interface{}) string { + diffCfg := *cfg + diffCfg.Diffable = true + return diff.Diff(cfg.Sprint(a), cfg.Sprint(b)) +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/reflect.go b/vendor/github.com/kylelemons/godebug/pretty/reflect.go new file mode 100644 index 000000000..5cd30b7f0 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/reflect.go @@ -0,0 +1,241 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "encoding" + "fmt" + "reflect" + "sort" +) + +func isZeroVal(val reflect.Value) bool { + if !val.CanInterface() { + return false + } + z := reflect.Zero(val.Type()).Interface() + return reflect.DeepEqual(val.Interface(), z) +} + +// pointerTracker is a helper for tracking pointer chasing to detect cycles. +type pointerTracker struct { + addrs map[uintptr]int // addr[address] = seen count + + lastID int + ids map[uintptr]int // ids[address] = id +} + +// track tracks following a reference (pointer, slice, map, etc). Every call to +// track should be paired with a call to untrack. +func (p *pointerTracker) track(ptr uintptr) { + if p.addrs == nil { + p.addrs = make(map[uintptr]int) + } + p.addrs[ptr]++ +} + +// untrack registers that we have backtracked over the reference to the pointer. +func (p *pointerTracker) untrack(ptr uintptr) { + p.addrs[ptr]-- + if p.addrs[ptr] == 0 { + delete(p.addrs, ptr) + } +} + +// seen returns whether the pointer was previously seen along this path. +func (p *pointerTracker) seen(ptr uintptr) bool { + _, ok := p.addrs[ptr] + return ok +} + +// keep allocates an ID for the given address and returns it. +func (p *pointerTracker) keep(ptr uintptr) int { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + if _, ok := p.ids[ptr]; !ok { + p.lastID++ + p.ids[ptr] = p.lastID + } + return p.ids[ptr] +} + +// id returns the ID for the given address. +func (p *pointerTracker) id(ptr uintptr) (int, bool) { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + id, ok := p.ids[ptr] + return id, ok +} + +// reflector adds local state to the recursive reflection logic. +type reflector struct { + *Config + *pointerTracker +} + +// follow handles following a possiblly-recursive reference to the given value +// from the given ptr address. +func (r *reflector) follow(ptr uintptr, val reflect.Value) node { + if r.pointerTracker == nil { + // Tracking disabled + return r.val2node(val) + } + + // If a parent already followed this, emit a reference marker + if r.seen(ptr) { + id := r.keep(ptr) + return ref{id} + } + + // Track the pointer we're following while on this recursive branch + r.track(ptr) + defer r.untrack(ptr) + n := r.val2node(val) + + // If the recursion used this ptr, wrap it with a target marker + if id, ok := r.id(ptr); ok { + return target{id, n} + } + + // Otherwise, return the node unadulterated + return n +} + +func (r *reflector) val2node(val reflect.Value) node { + if !val.IsValid() { + return rawVal("nil") + } + + if val.CanInterface() { + v := val.Interface() + if formatter, ok := r.Formatter[val.Type()]; ok { + if formatter != nil { + res := reflect.ValueOf(formatter).Call([]reflect.Value{val}) + return rawVal(res[0].Interface().(string)) + } + } else { + if s, ok := v.(fmt.Stringer); ok && r.PrintStringers { + return stringVal(s.String()) + } + if t, ok := v.(encoding.TextMarshaler); ok && r.PrintTextMarshalers { + if raw, err := t.MarshalText(); err == nil { // if NOT an error + return stringVal(string(raw)) + } + } + } + } + + switch kind := val.Kind(); kind { + case reflect.Ptr: + if val.IsNil() { + return rawVal("nil") + } + return r.follow(val.Pointer(), val.Elem()) + case reflect.Interface: + if val.IsNil() { + return rawVal("nil") + } + return r.val2node(val.Elem()) + case reflect.String: + return stringVal(val.String()) + case reflect.Slice: + n := list{} + length := val.Len() + ptr := val.Pointer() + for i := 0; i < length; i++ { + n = append(n, r.follow(ptr, val.Index(i))) + } + return n + case reflect.Array: + n := list{} + length := val.Len() + for i := 0; i < length; i++ { + n = append(n, r.val2node(val.Index(i))) + } + return n + case reflect.Map: + // Extract the keys and sort them for stable iteration + keys := val.MapKeys() + pairs := make([]mapPair, 0, len(keys)) + for _, key := range keys { + pairs = append(pairs, mapPair{ + key: new(formatter).compactString(r.val2node(key)), // can't be cyclic + value: val.MapIndex(key), + }) + } + sort.Sort(byKey(pairs)) + + // Process the keys into the final representation + ptr, n := val.Pointer(), keyvals{} + for _, pair := range pairs { + n = append(n, keyval{ + key: pair.key, + val: r.follow(ptr, pair.value), + }) + } + return n + case reflect.Struct: + n := keyvals{} + typ := val.Type() + fields := typ.NumField() + for i := 0; i < fields; i++ { + sf := typ.Field(i) + if !r.IncludeUnexported && sf.PkgPath != "" { + continue + } + field := val.Field(i) + if r.SkipZeroFields && isZeroVal(field) { + continue + } + n = append(n, keyval{sf.Name, r.val2node(field)}) + } + return n + case reflect.Bool: + if val.Bool() { + return rawVal("true") + } + return rawVal("false") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rawVal(fmt.Sprintf("%d", val.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rawVal(fmt.Sprintf("%d", val.Uint())) + case reflect.Uintptr: + return rawVal(fmt.Sprintf("0x%X", val.Uint())) + case reflect.Float32, reflect.Float64: + return rawVal(fmt.Sprintf("%v", val.Float())) + case reflect.Complex64, reflect.Complex128: + return rawVal(fmt.Sprintf("%v", val.Complex())) + } + + // Fall back to the default %#v if we can + if val.CanInterface() { + return rawVal(fmt.Sprintf("%#v", val.Interface())) + } + + return rawVal(val.String()) +} + +type mapPair struct { + key string + value reflect.Value +} + +type byKey []mapPair + +func (v byKey) Len() int { return len(v) } +func (v byKey) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v byKey) Less(i, j int) bool { return v[i].key < v[j].key } diff --git a/vendor/github.com/kylelemons/godebug/pretty/structure.go b/vendor/github.com/kylelemons/godebug/pretty/structure.go new file mode 100644 index 000000000..d876f60ca --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/structure.go @@ -0,0 +1,223 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// a formatter stores stateful formatting information as well as being +// an io.Writer for simplicity. +type formatter struct { + *bufio.Writer + *Config + + // Self-referential structure tracking + tagNumbers map[int]int // tagNumbers[id] = <#n> +} + +// newFormatter creates a new buffered formatter. For the output to be written +// to the given writer, this must be accompanied by a call to write (or Flush). +func newFormatter(cfg *Config, w io.Writer) *formatter { + return &formatter{ + Writer: bufio.NewWriter(w), + Config: cfg, + tagNumbers: make(map[int]int), + } +} + +func (f *formatter) write(n node) { + defer f.Flush() + n.format(f, "") +} + +func (f *formatter) tagFor(id int) int { + if tag, ok := f.tagNumbers[id]; ok { + return tag + } + if f.tagNumbers == nil { + return 0 + } + tag := len(f.tagNumbers) + 1 + f.tagNumbers[id] = tag + return tag +} + +type node interface { + format(f *formatter, indent string) +} + +func (f *formatter) compactString(n node) string { + switch k := n.(type) { + case stringVal: + return string(k) + case rawVal: + return string(k) + } + + buf := new(bytes.Buffer) + f2 := newFormatter(&Config{Compact: true}, buf) + f2.tagNumbers = f.tagNumbers // reuse tagNumbers just in case + f2.write(n) + return buf.String() +} + +type stringVal string + +func (str stringVal) format(f *formatter, indent string) { + f.WriteString(strconv.Quote(string(str))) +} + +type rawVal string + +func (r rawVal) format(f *formatter, indent string) { + f.WriteString(string(r)) +} + +type keyval struct { + key string + val node +} + +type keyvals []keyval + +func (l keyvals) format(f *formatter, indent string) { + f.WriteByte('{') + + switch { + case f.Compact: + // All on one line: + for i, kv := range l { + if i > 0 { + f.WriteByte(',') + } + f.WriteString(kv.key) + f.WriteByte(':') + kv.val.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, kv := range l { + f.WriteString(inner) + f.WriteString(kv.key) + f.WriteString(": ") + kv.val.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + keyWidth := 0 + for _, kv := range l { + if kw := len(kv.key); kw > keyWidth { + keyWidth = kw + } + } + alignKey := indent + " " + alignValue := strings.Repeat(" ", keyWidth) + inner := alignKey + alignValue + " " + // First and last line shared with bracket: + for i, kv := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(alignKey) + } + f.WriteString(kv.key) + f.WriteString(": ") + f.WriteString(alignValue[len(kv.key):]) + kv.val.format(f, inner) + } + } + + f.WriteByte('}') +} + +type list []node + +func (l list) format(f *formatter, indent string) { + if max := f.ShortList; max > 0 { + short := f.compactString(l) + if len(short) <= max { + f.WriteString(short) + return + } + } + + f.WriteByte('[') + + switch { + case f.Compact: + // All on one line: + for i, v := range l { + if i > 0 { + f.WriteByte(',') + } + v.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, v := range l { + f.WriteString(inner) + v.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + inner := indent + " " + // First and last line shared with bracket: + for i, v := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(inner) + } + v.format(f, inner) + } + } + + f.WriteByte(']') +} + +type ref struct { + id int +} + +func (r ref) format(f *formatter, indent string) { + fmt.Fprintf(f, "", f.tagFor(r.id)) +} + +type target struct { + id int + value node +} + +func (t target) format(f *formatter, indent string) { + tag := fmt.Sprintf("<#%d> ", f.tagFor(t.id)) + switch { + case f.Diffable, f.Compact: + // no indent changes + default: + indent += strings.Repeat(" ", len(tag)) + } + f.WriteString(tag) + t.value.format(f, indent) +} diff --git a/vendor/github.com/linode/linodego/.golangci.yml b/vendor/github.com/linode/linodego/.golangci.yml index 40983e5b0..abdc2337b 100644 --- a/vendor/github.com/linode/linodego/.golangci.yml +++ b/vendor/github.com/linode/linodego/.golangci.yml @@ -29,6 +29,13 @@ linters-settings: linters: enable-all: true disable: + # deprecated linters + - deadcode + - ifshort + - varcheck + - nosnakecase + #################### + - bodyclose - contextcheck - nilerr @@ -71,4 +78,6 @@ linters: - cyclop - godot - exhaustive + - depguard + - tagalign fast: false diff --git a/vendor/github.com/linode/linodego/CODEOWNERS b/vendor/github.com/linode/linodego/CODEOWNERS new file mode 100644 index 000000000..34864d6fc --- /dev/null +++ b/vendor/github.com/linode/linodego/CODEOWNERS @@ -0,0 +1 @@ +* @linode/dx diff --git a/vendor/github.com/linode/linodego/Makefile b/vendor/github.com/linode/linodego/Makefile index ebe4ecbc7..529a733e5 100644 --- a/vendor/github.com/linode/linodego/Makefile +++ b/vendor/github.com/linode/linodego/Makefile @@ -8,7 +8,7 @@ TEST_TIMEOUT := 5h SKIP_DOCKER ?= 0 GOLANGCILINT := golangci-lint -GOLANGCILINT_IMG := golangci/golangci-lint:v1.46.2-alpine +GOLANGCILINT_IMG := golangci/golangci-lint:latest GOLANGCILINT_ARGS := run LINODE_URL := https://api.linode.com/ @@ -29,6 +29,9 @@ testunit: testint: cd test && make test +smoketest: + cd test && make smoketest + build: vet lint go build ./... cd k8s && go build ./... diff --git a/vendor/github.com/linode/linodego/account_events.go b/vendor/github.com/linode/linodego/account_events.go index e8306a1ba..137324486 100644 --- a/vendor/github.com/linode/linodego/account_events.go +++ b/vendor/github.com/linode/linodego/account_events.go @@ -95,6 +95,7 @@ const ( ActionHostReboot EventAction = "host_reboot" ActionImageDelete EventAction = "image_delete" ActionImageUpdate EventAction = "image_update" + ActionImageUpload EventAction = "image_upload" ActionLassieReboot EventAction = "lassie_reboot" ActionLinodeAddIP EventAction = "linode_addip" ActionLinodeBoot EventAction = "linode_boot" diff --git a/vendor/github.com/linode/linodego/account_oauth_client.go b/vendor/github.com/linode/linodego/account_oauth_client.go index 1a43b6083..dba898d29 100644 --- a/vendor/github.com/linode/linodego/account_oauth_client.go +++ b/vendor/github.com/linode/linodego/account_oauth_client.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -119,6 +120,7 @@ func (c *Client) ListOAuthClients(ctx context.Context, opts *ListOptions) ([]OAu // GetOAuthClient gets the OAuthClient with the provided ID func (c *Client) GetOAuthClient(ctx context.Context, clientID string) (*OAuthClient, error) { req := c.R(ctx).SetResult(&OAuthClient{}) + clientID = url.PathEscape(clientID) e := fmt.Sprintf("account/oauth-clients/%s", clientID) r, err := coupleAPIErrors(req.Get(e)) if err != nil { @@ -153,6 +155,9 @@ func (c *Client) UpdateOAuthClient(ctx context.Context, clientID string, opts OA } req := c.R(ctx).SetResult(&OAuthClient{}).SetBody(string(body)) + + clientID = url.PathEscape(clientID) + e := fmt.Sprintf("account/oauth-clients/%s", clientID) r, err := coupleAPIErrors(req.Put(e)) if err != nil { @@ -164,6 +169,7 @@ func (c *Client) UpdateOAuthClient(ctx context.Context, clientID string, opts OA // DeleteOAuthClient deletes the OAuthClient with the specified id func (c *Client) DeleteOAuthClient(ctx context.Context, clientID string) error { + clientID = url.PathEscape(clientID) e := fmt.Sprintf("account/oauth-clients/%s", clientID) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/account_settings.go b/vendor/github.com/linode/linodego/account_settings.go index bf60162d0..9a4b1362b 100644 --- a/vendor/github.com/linode/linodego/account_settings.go +++ b/vendor/github.com/linode/linodego/account_settings.go @@ -29,6 +29,7 @@ type AccountSettingsUpdateOptions struct { BackupsEnabled *bool `json:"backups_enabled,omitempty"` // A plan name like "longview-3"..."longview-100", or a nil value for to cancel any existing subscription plan. + // Deprecated: Use PUT /longview/plan instead to update the LongviewSubscription LongviewSubscription *string `json:"longview_subscription,omitempty"` // The default network helper setting for all new Linodes and Linode Configs for all users on the account. diff --git a/vendor/github.com/linode/linodego/account_user_grants.go b/vendor/github.com/linode/linodego/account_user_grants.go index 33b8c15ea..221fb5f7b 100644 --- a/vendor/github.com/linode/linodego/account_user_grants.go +++ b/vendor/github.com/linode/linodego/account_user_grants.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" ) type GrantPermissionLevel string @@ -68,6 +69,7 @@ type UserGrantsUpdateOptions struct { } func (c *Client) GetUserGrants(ctx context.Context, username string) (*UserGrants, error) { + username = url.PathEscape(username) e := fmt.Sprintf("account/users/%s/grants", username) req := c.R(ctx).SetResult(&UserGrants{}) r, err := coupleAPIErrors(req.Get(e)) @@ -84,6 +86,7 @@ func (c *Client) UpdateUserGrants(ctx context.Context, username string, opts Use return nil, err } + username = url.PathEscape(username) e := fmt.Sprintf("account/users/%s/grants", username) req := c.R(ctx).SetResult(&UserGrants{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Put(e)) diff --git a/vendor/github.com/linode/linodego/account_users.go b/vendor/github.com/linode/linodego/account_users.go index 4130163e1..d9633d881 100644 --- a/vendor/github.com/linode/linodego/account_users.go +++ b/vendor/github.com/linode/linodego/account_users.go @@ -4,17 +4,22 @@ import ( "context" "encoding/json" "fmt" + "net/url" + "time" "github.com/go-resty/resty/v2" + "github.com/linode/linodego/internal/parseabletime" ) // User represents a User object type User struct { - Username string `json:"username"` - Email string `json:"email"` - Restricted bool `json:"restricted"` - TFAEnabled bool `json:"tfa_enabled"` - SSHKeys []string `json:"ssh_keys"` + Username string `json:"username"` + Email string `json:"email"` + Restricted bool `json:"restricted"` + TFAEnabled bool `json:"tfa_enabled"` + SSHKeys []string `json:"ssh_keys"` + PasswordCreated *time.Time `json:"-"` + VerifiedPhoneNumber *string `json:"verified_phone_number"` } // UserCreateOptions fields are those accepted by CreateUser @@ -30,6 +35,26 @@ type UserUpdateOptions struct { Restricted *bool `json:"restricted,omitempty"` } +// UnmarshalJSON implements the json.Unmarshaler interface +func (i *User) UnmarshalJSON(b []byte) error { + type Mask User + + p := struct { + *Mask + PasswordCreated *parseabletime.ParseableTime `json:"password_created"` + }{ + Mask: (*Mask)(i), + } + + if err := json.Unmarshal(b, &p); err != nil { + return err + } + + i.PasswordCreated = (*time.Time)(p.PasswordCreated) + + return nil +} + // GetCreateOptions converts a User to UserCreateOptions for use in CreateUser func (i User) GetCreateOptions() (o UserCreateOptions) { o.Username = i.Username @@ -81,6 +106,7 @@ func (c *Client) ListUsers(ctx context.Context, opts *ListOptions) ([]User, erro // GetUser gets the user with the provided ID func (c *Client) GetUser(ctx context.Context, userID string) (*User, error) { + userID = url.PathEscape(userID) e := fmt.Sprintf("account/users/%s", userID) req := c.R(ctx).SetResult(&User{}) r, err := coupleAPIErrors(req.Get(e)) @@ -116,6 +142,7 @@ func (c *Client) UpdateUser(ctx context.Context, userID string, opts UserUpdateO return nil, err } + userID = url.PathEscape(userID) e := fmt.Sprintf("account/users/%s", userID) req := c.R(ctx).SetResult(&User{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Put(e)) @@ -128,6 +155,7 @@ func (c *Client) UpdateUser(ctx context.Context, userID string, opts UserUpdateO // DeleteUser deletes the User with the specified id func (c *Client) DeleteUser(ctx context.Context, userID string) error { + userID = url.PathEscape(userID) e := fmt.Sprintf("account/users/%s", userID) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/client.go b/vendor/github.com/linode/linodego/client.go index bb992ea12..5262c8485 100644 --- a/vendor/github.com/linode/linodego/client.go +++ b/vendor/github.com/linode/linodego/client.go @@ -52,7 +52,7 @@ type Client struct { debug bool retryConditionals []RetryConditional - millisecondsPerPoll time.Duration + pollInterval time.Duration baseURL string apiVersion string @@ -166,7 +166,14 @@ func (c *Client) updateHostURL() { apiProto = c.apiProto } - c.resty.SetHostURL(fmt.Sprintf("%s://%s/%s", apiProto, baseURL, apiVersion)) + c.resty.SetHostURL( + fmt.Sprintf( + "%s://%s/%s", + apiProto, + baseURL, + url.PathEscape(apiVersion), + ), + ) } // SetRootCertificate adds a root certificate to the underlying TLS client config @@ -344,14 +351,21 @@ func (c *Client) SetRetryCount(count int) *Client { // SetPollDelay sets the number of milliseconds to wait between events or status polls. // Affects all WaitFor* functions and retries. func (c *Client) SetPollDelay(delay time.Duration) *Client { - c.millisecondsPerPoll = delay + c.pollInterval = delay return c } // GetPollDelay gets the number of milliseconds to wait between events or status polls. // Affects all WaitFor* functions and retries. func (c *Client) GetPollDelay() time.Duration { - return c.millisecondsPerPoll + return c.pollInterval +} + +// SetHeader sets a custom header to be used in all API requests made with the current +// client. +// NOTE: Some headers may be overridden by the individual request functions. +func (c *Client) SetHeader(name, value string) { + c.resty.SetHeader(name, value) } // NewClient factory to create new Client struct @@ -398,7 +412,7 @@ func NewClient(hc *http.Client) (client Client) { client. SetRetryWaitTime((1000 * APISecondsPerPoll) * time.Millisecond). - SetPollDelay(1000 * APISecondsPerPoll). + SetPollDelay(APISecondsPerPoll * time.Second). SetRetries(). SetDebug(envDebug) @@ -514,12 +528,12 @@ func copyTime(tPtr *time.Time) *time.Time { func generateListCacheURL(endpoint string, opts *ListOptions) (string, error) { if opts == nil { - return "", nil + return endpoint, nil } hashedOpts, err := opts.Hash() if err != nil { - return "", err + return endpoint, err } return fmt.Sprintf("%s:%s", endpoint, hashedOpts), nil diff --git a/vendor/github.com/linode/linodego/databases.go b/vendor/github.com/linode/linodego/databases.go index eace5fdc9..5a21aae7d 100644 --- a/vendor/github.com/linode/linodego/databases.go +++ b/vendor/github.com/linode/linodego/databases.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "time" "github.com/go-resty/resty/v2" @@ -34,7 +35,6 @@ const ( const ( DatabaseEngineTypeMySQL DatabaseEngineType = "mysql" - DatabaseEngineTypeMongo DatabaseEngineType = "mongodb" DatabaseEngineTypePostgres DatabaseEngineType = "postgresql" ) @@ -236,7 +236,8 @@ func (c *Client) ListDatabaseEngines(ctx context.Context, opts *ListOptions) ([] } // GetDatabaseEngine returns a specific Database Engine. This endpoint is cached by default. -func (c *Client) GetDatabaseEngine(ctx context.Context, opts *ListOptions, engineID string) (*DatabaseEngine, error) { +func (c *Client) GetDatabaseEngine(ctx context.Context, _ *ListOptions, engineID string) (*DatabaseEngine, error) { + engineID = url.PathEscape(engineID) e := fmt.Sprintf("databases/engines/%s", engineID) if result := c.getCachedResponse(e); result != nil { @@ -279,7 +280,8 @@ func (c *Client) ListDatabaseTypes(ctx context.Context, opts *ListOptions) ([]Da } // GetDatabaseType returns a specific Database Type. This endpoint is cached by default. -func (c *Client) GetDatabaseType(ctx context.Context, opts *ListOptions, typeID string) (*DatabaseType, error) { +func (c *Client) GetDatabaseType(ctx context.Context, _ *ListOptions, typeID string) (*DatabaseType, error) { + typeID = url.PathEscape(typeID) e := fmt.Sprintf("databases/types/%s", typeID) if result := c.getCachedResponse(e); result != nil { diff --git a/vendor/github.com/linode/linodego/go.work.sum b/vendor/github.com/linode/linodego/go.work.sum index ea20c135a..2b28331a5 100644 --- a/vendor/github.com/linode/linodego/go.work.sum +++ b/vendor/github.com/linode/linodego/go.work.sum @@ -1,70 +1,166 @@ +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/getkin/kin-openapi v0.76.0 h1:j77zg3Ec+k+r+GA3d8hBoXpAc6KX9TbBPrwQGBIy2sY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/linode/linodego v0.20.1 h1:Kw5Qes0E0wlKVx5EbITI+F/ambO6G+PQyK0Yi7i4EyQ= github.com/linode/linodego v0.20.1/go.mod h1:XOWXRHjqeU2uPS84tKLgfWIfTlv3TYzCS0io4GOQzEI= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/maxatome/go-testdeep v1.11.0/go.mod h1:011SgQ6efzZYAen6fDn4BqQ+lUR72ysdyKe7Dyogw70= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8= +google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/linode/linodego/images.go b/vendor/github.com/linode/linodego/images.go index 46efc59db..d9e29f49e 100644 --- a/vendor/github.com/linode/linodego/images.go +++ b/vendor/github.com/linode/linodego/images.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "net/url" "time" "github.com/go-resty/resty/v2" @@ -23,18 +24,19 @@ const ( // Image represents a deployable Image object for use with Linode Instances type Image struct { - ID string `json:"id"` - CreatedBy string `json:"created_by"` - Label string `json:"label"` - Description string `json:"description"` - Type string `json:"type"` - Vendor string `json:"vendor"` - Status ImageStatus `json:"status"` - Size int `json:"size"` - IsPublic bool `json:"is_public"` - Deprecated bool `json:"deprecated"` - Created *time.Time `json:"-"` - Expiry *time.Time `json:"-"` + ID string `json:"id"` + CreatedBy string `json:"created_by"` + Capabilities []string `json:"capabilities"` + Label string `json:"label"` + Description string `json:"description"` + Type string `json:"type"` + Vendor string `json:"vendor"` + Status ImageStatus `json:"status"` + Size int `json:"size"` + IsPublic bool `json:"is_public"` + Deprecated bool `json:"deprecated"` + Created *time.Time `json:"-"` + Expiry *time.Time `json:"-"` } // ImageCreateOptions fields are those accepted by CreateImage @@ -42,6 +44,7 @@ type ImageCreateOptions struct { DiskID int `json:"disk_id"` Label string `json:"label"` Description string `json:"description,omitempty"` + CloudInit bool `json:"cloud_init,omitempty"` } // ImageUpdateOptions fields are those accepted by UpdateImage @@ -61,6 +64,7 @@ type ImageCreateUploadOptions struct { Region string `json:"region"` Label string `json:"label"` Description string `json:"description,omitempty"` + CloudInit bool `json:"cloud_init,omitempty"` } // ImageUploadOptions fields are those accepted by UploadImage @@ -68,6 +72,7 @@ type ImageUploadOptions struct { Region string `json:"region"` Label string `json:"label"` Description string `json:"description,omitempty"` + CloudInit bool `json:"cloud_init"` Image io.Reader } @@ -132,6 +137,8 @@ func (c *Client) ListImages(ctx context.Context, opts *ListOptions) ([]Image, er // GetImage gets the Image with the provided ID func (c *Client) GetImage(ctx context.Context, imageID string) (*Image, error) { + imageID = url.PathEscape(imageID) + e := fmt.Sprintf("images/%s", imageID) req := c.R(ctx).SetResult(&Image{}) r, err := coupleAPIErrors(req.Get(e)) @@ -164,6 +171,8 @@ func (c *Client) UpdateImage(ctx context.Context, imageID string, opts ImageUpda return nil, err } + imageID = url.PathEscape(imageID) + e := fmt.Sprintf("images/%s", imageID) req := c.R(ctx).SetResult(&Image{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Put(e)) @@ -175,6 +184,7 @@ func (c *Client) UpdateImage(ctx context.Context, imageID string, opts ImageUpda // DeleteImage deletes the Image with the specified id func (c *Client) DeleteImage(ctx context.Context, imageID string) error { + imageID = url.PathEscape(imageID) e := fmt.Sprintf("images/%s", imageID) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err @@ -223,6 +233,7 @@ func (c *Client) UploadImage(ctx context.Context, opts ImageUploadOptions) (*Ima Label: opts.Label, Region: opts.Region, Description: opts.Description, + CloudInit: opts.CloudInit, }) if err != nil { return nil, err diff --git a/vendor/github.com/linode/linodego/instance_ips.go b/vendor/github.com/linode/linodego/instance_ips.go index 3e03310d5..376142a99 100644 --- a/vendor/github.com/linode/linodego/instance_ips.go +++ b/vendor/github.com/linode/linodego/instance_ips.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" ) // InstanceIPAddressResponse contains the IPv4 and IPv6 details for an Instance @@ -77,6 +78,7 @@ func (c *Client) GetInstanceIPAddresses(ctx context.Context, linodeID int) (*Ins // GetInstanceIPAddress gets the IPAddress for a Linode instance matching a supplied IP address func (c *Client) GetInstanceIPAddress(ctx context.Context, linodeID int, ipaddress string) (*InstanceIP, error) { + ipaddress = url.PathEscape(ipaddress) e := fmt.Sprintf("linode/instances/%d/ips/%s", linodeID, ipaddress) req := c.R(ctx).SetResult(&InstanceIP{}) r, err := coupleAPIErrors(req.Get(e)) @@ -116,6 +118,8 @@ func (c *Client) UpdateInstanceIPAddress(ctx context.Context, linodeID int, ipAd return nil, err } + ipAddress = url.PathEscape(ipAddress) + e := fmt.Sprintf("linode/instances/%d/ips/%s", linodeID, ipAddress) req := c.R(ctx).SetResult(&InstanceIP{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Put(e)) @@ -126,6 +130,7 @@ func (c *Client) UpdateInstanceIPAddress(ctx context.Context, linodeID int, ipAd } func (c *Client) DeleteInstanceIPAddress(ctx context.Context, linodeID int, ipAddress string) error { + ipAddress = url.PathEscape(ipAddress) e := fmt.Sprintf("linode/instances/%d/ips/%s", linodeID, ipAddress) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/instances.go b/vendor/github.com/linode/linodego/instances.go index 7d6c7566e..1a0de9ea4 100644 --- a/vendor/github.com/linode/linodego/instances.go +++ b/vendor/github.com/linode/linodego/instances.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "net" + "net/url" "time" "github.com/go-resty/resty/v2" @@ -49,6 +50,7 @@ type Instance struct { Label string `json:"label"` Type string `json:"type"` Status InstanceStatus `json:"status"` + HasUserData bool `json:"has_user_data"` Hypervisor string `json:"hypervisor"` HostUUID string `json:"host_uuid"` Specs *InstanceSpec `json:"specs"` @@ -95,6 +97,13 @@ type InstanceTransfer struct { Quota int `json:"quota"` } +// InstanceMetadataOptions specifies various Instance creation fields +// that relate to the Linode Metadata service. +type InstanceMetadataOptions struct { + // UserData expects a Base64-encoded string + UserData string `json:"user_data,omitempty"` +} + // InstanceCreateOptions require only Region and Type type InstanceCreateOptions struct { Region string `json:"region"` @@ -112,6 +121,7 @@ type InstanceCreateOptions struct { BackupsEnabled bool `json:"backups_enabled,omitempty"` PrivateIP bool `json:"private_ip,omitempty"` Tags []string `json:"tags,omitempty"` + Metadata *InstanceMetadataOptions `json:"metadata,omitempty"` // Creation fields that need to be set explicitly false, "", or 0 use pointers SwapSize *int `json:"swap_size,omitempty"` @@ -168,13 +178,14 @@ type InstanceCloneOptions struct { Type string `json:"type,omitempty"` // LinodeID is an optional existing instance to use as the target of the clone - LinodeID int `json:"linode_id,omitempty"` - Label string `json:"label,omitempty"` - Group string `json:"group,omitempty"` - BackupsEnabled bool `json:"backups_enabled"` - Disks []int `json:"disks,omitempty"` - Configs []int `json:"configs,omitempty"` - PrivateIP bool `json:"private_ip,omitempty"` + LinodeID int `json:"linode_id,omitempty"` + Label string `json:"label,omitempty"` + Group string `json:"group,omitempty"` + BackupsEnabled bool `json:"backups_enabled"` + Disks []int `json:"disks,omitempty"` + Configs []int `json:"configs,omitempty"` + PrivateIP bool `json:"private_ip,omitempty"` + Metadata *InstanceMetadataOptions `json:"metadata,omitempty"` } // InstanceResizeOptions is an options struct used when resizing an instance @@ -336,13 +347,14 @@ func (c *Client) RebootInstance(ctx context.Context, linodeID int, configID int) // InstanceRebuildOptions is a struct representing the options to send to the rebuild linode endpoint type InstanceRebuildOptions struct { - Image string `json:"image,omitempty"` - RootPass string `json:"root_pass,omitempty"` - AuthorizedKeys []string `json:"authorized_keys,omitempty"` - AuthorizedUsers []string `json:"authorized_users,omitempty"` - StackScriptID int `json:"stackscript_id,omitempty"` - StackScriptData map[string]string `json:"stackscript_data,omitempty"` - Booted *bool `json:"booted,omitempty"` + Image string `json:"image,omitempty"` + RootPass string `json:"root_pass,omitempty"` + AuthorizedKeys []string `json:"authorized_keys,omitempty"` + AuthorizedUsers []string `json:"authorized_users,omitempty"` + StackScriptID int `json:"stackscript_id,omitempty"` + StackScriptData map[string]string `json:"stackscript_data,omitempty"` + Booted *bool `json:"booted,omitempty"` + Metadata *InstanceMetadataOptions `json:"metadata,omitempty"` } // RebuildInstance Deletes all Disks and Configs on this Linode, @@ -409,6 +421,7 @@ func (c *Client) MigrateInstance(ctx context.Context, id int) error { // simpleInstanceAction is a helper for Instance actions that take no parameters // and return empty responses `{}` unless they return a standard error func (c *Client) simpleInstanceAction(ctx context.Context, action string, linodeID int) error { + action = url.PathEscape(action) e := fmt.Sprintf("linode/instances/%d/%s", linodeID, action) _, err := coupleAPIErrors(c.R(ctx).Post(e)) return err diff --git a/vendor/github.com/linode/linodego/kernels.go b/vendor/github.com/linode/linodego/kernels.go index 06c2fc600..0c4246f5f 100644 --- a/vendor/github.com/linode/linodego/kernels.go +++ b/vendor/github.com/linode/linodego/kernels.go @@ -3,6 +3,7 @@ package linodego import ( "context" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -64,6 +65,7 @@ func (c *Client) ListKernels(ctx context.Context, opts *ListOptions) ([]LinodeKe // GetKernel gets the kernel with the provided ID. This endpoint is cached by default. func (c *Client) GetKernel(ctx context.Context, kernelID string) (*LinodeKernel, error) { + kernelID = url.PathEscape(kernelID) e := fmt.Sprintf("linode/kernels/%s", kernelID) if result := c.getCachedResponse(e); result != nil { diff --git a/vendor/github.com/linode/linodego/lke_clusters.go b/vendor/github.com/linode/linodego/lke_clusters.go index f764c7c15..ff9b67659 100644 --- a/vendor/github.com/linode/linodego/lke_clusters.go +++ b/vendor/github.com/linode/linodego/lke_clusters.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "time" "github.com/go-resty/resty/v2" @@ -169,6 +170,7 @@ func (c *Client) ListLKEVersions(ctx context.Context, opts *ListOptions) ([]LKEV // GetLKEVersion gets details about a specific LKE Version. This endpoint is cached by default. func (c *Client) GetLKEVersion(ctx context.Context, version string) (*LKEVersion, error) { + version = url.PathEscape(version) e := fmt.Sprintf("lke/versions/%s", version) if result := c.getCachedResponse(e); result != nil { diff --git a/vendor/github.com/linode/linodego/lke_node_pools.go b/vendor/github.com/linode/linodego/lke_node_pools.go index 42097f13d..b7e5bc0bb 100644 --- a/vendor/github.com/linode/linodego/lke_node_pools.go +++ b/vendor/github.com/linode/linodego/lke_node_pools.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -170,6 +171,7 @@ func (c *Client) DeleteLKENodePool(ctx context.Context, clusterID, poolID int) e // DeleteLKENodePoolNode deletes a given node from a node pool func (c *Client) DeleteLKENodePoolNode(ctx context.Context, clusterID int, nodeID string) error { + nodeID = url.PathEscape(nodeID) e := fmt.Sprintf("lke/clusters/%d/nodes/%s", clusterID, nodeID) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/longview_subscriptions.go b/vendor/github.com/linode/linodego/longview_subscriptions.go index 8b5c4e126..efb3ee126 100644 --- a/vendor/github.com/linode/linodego/longview_subscriptions.go +++ b/vendor/github.com/linode/linodego/longview_subscriptions.go @@ -3,6 +3,7 @@ package linodego import ( "context" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -50,6 +51,7 @@ func (c *Client) ListLongviewSubscriptions(ctx context.Context, opts *ListOption // GetLongviewSubscription gets the template with the provided ID func (c *Client) GetLongviewSubscription(ctx context.Context, templateID string) (*LongviewSubscription, error) { + templateID = url.PathEscape(templateID) e := fmt.Sprintf("longview/subscriptions/%s", templateID) req := c.R(ctx).SetResult(&LongviewSubscription{}) r, err := coupleAPIErrors(req.Get(e)) diff --git a/vendor/github.com/linode/linodego/mongo.go b/vendor/github.com/linode/linodego/mongo.go deleted file mode 100644 index 47b83fdc0..000000000 --- a/vendor/github.com/linode/linodego/mongo.go +++ /dev/null @@ -1,324 +0,0 @@ -package linodego - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/go-resty/resty/v2" - "github.com/linode/linodego/internal/parseabletime" -) - -type MongoDatabaseTarget string - -const ( - MongoDatabaseTargetPrimary MongoDatabaseTarget = "primary" - MongoDatabaseTargetSecondary MongoDatabaseTarget = "secondary" -) - -type MongoCompressionType string - -const ( - MongoCompressionNone MongoCompressionType = "none" - MongoCompressionSnappy MongoCompressionType = "snappy" - MongoCompressionZlib MongoCompressionType = "zlib" -) - -type MongoStorageEngine string - -const ( - MongoStorageWiredTiger MongoStorageEngine = "wiredtiger" - MongoStorageMmapv1 MongoStorageEngine = "mmapv1" -) - -// A MongoDatabase is a instance of Linode Mongo Managed Databases -type MongoDatabase struct { - ID int `json:"id"` - Status DatabaseStatus `json:"status"` - Label string `json:"label"` - Region string `json:"region"` - Type string `json:"type"` - Engine string `json:"engine"` - Version string `json:"version"` - Encrypted bool `json:"encrypted"` - AllowList []string `json:"allow_list"` - Peers []string `json:"peers"` - Port int `json:"port"` - ReplicaSet string `json:"replica_set"` - SSLConnection bool `json:"ssl_connection"` - ClusterSize int `json:"cluster_size"` - Hosts DatabaseHost `json:"hosts"` - CompressionType MongoCompressionType `json:"compression_type"` - StorageEngine MongoStorageEngine `json:"storage_engine"` - Updates DatabaseMaintenanceWindow `json:"updates"` - Created *time.Time `json:"-"` - Updated *time.Time `json:"-"` -} - -func (d *MongoDatabase) UnmarshalJSON(b []byte) error { - type Mask MongoDatabase - - p := struct { - *Mask - Created *parseabletime.ParseableTime `json:"created"` - Updated *parseabletime.ParseableTime `json:"updated"` - }{ - Mask: (*Mask)(d), - } - - if err := json.Unmarshal(b, &p); err != nil { - return err - } - - d.Created = (*time.Time)(p.Created) - d.Updated = (*time.Time)(p.Updated) - return nil -} - -// MongoCreateOptions fields are used when creating a new Mongo Database -type MongoCreateOptions struct { - Label string `json:"label"` - Region string `json:"region"` - Type string `json:"type"` - Engine string `json:"engine"` - AllowList []string `json:"allow_list,omitempty"` - ClusterSize int `json:"cluster_size,omitempty"` - Encrypted bool `json:"encrypted,omitempty"` - SSLConnection bool `json:"ssl_connection,omitempty"` - CompressionType MongoCompressionType `json:"compression_type,omitempty"` - StorageEngine MongoStorageEngine `json:"storage_engine,omitempty"` -} - -// MongoUpdateOptions fields are used when altering the existing Mongo Database -type MongoUpdateOptions struct { - Label string `json:"label,omitempty"` - AllowList *[]string `json:"allow_list,omitempty"` - Updates *DatabaseMaintenanceWindow `json:"updates,omitempty"` -} - -// MongoDatabaseSSL is the SSL Certificate to access the Linode Managed Mongo Database -type MongoDatabaseSSL struct { - CACertificate []byte `json:"ca_certificate"` -} - -// MongoDatabaseCredential is the Root Credentials to access the Linode Managed Database -type MongoDatabaseCredential struct { - Username string `json:"username"` - Password string `json:"password"` -} - -type MongoDatabasesPagedResponse struct { - *PageOptions - Data []MongoDatabase `json:"data"` -} - -func (MongoDatabasesPagedResponse) endpoint(_ ...any) string { - return "databases/mongodb/instances" -} - -func (resp *MongoDatabasesPagedResponse) castResult(r *resty.Request, e string) (int, int, error) { - res, err := coupleAPIErrors(r.SetResult(MongoDatabasesPagedResponse{}).Get(e)) - if err != nil { - return 0, 0, err - } - castedRes := res.Result().(*MongoDatabasesPagedResponse) - resp.Data = append(resp.Data, castedRes.Data...) - return castedRes.Pages, castedRes.Results, nil -} - -// ListMongoDatabases lists all Mongo Databases associated with the account -func (c *Client) ListMongoDatabases(ctx context.Context, opts *ListOptions) ([]MongoDatabase, error) { - response := MongoDatabasesPagedResponse{} - - err := c.listHelper(ctx, &response, opts) - if err != nil { - return nil, err - } - - return response.Data, nil -} - -// MongoDatabaseBackup is information for interacting with a backup for the existing Mongo Database -type MongoDatabaseBackup struct { - ID int `json:"id"` - Label string `json:"label"` - Type string `json:"type"` - Created *time.Time `json:"-"` -} - -func (d *MongoDatabaseBackup) UnmarshalJSON(b []byte) error { - type Mask MongoDatabaseBackup - - p := struct { - *Mask - Created *parseabletime.ParseableTime `json:"created"` - }{ - Mask: (*Mask)(d), - } - - if err := json.Unmarshal(b, &p); err != nil { - return err - } - - d.Created = (*time.Time)(p.Created) - return nil -} - -// MongoBackupCreateOptions are options used for CreateMongoDatabaseBackup(...) -type MongoBackupCreateOptions struct { - Label string `json:"label"` - Target MongoDatabaseTarget `json:"target"` -} - -type MongoDatabaseBackupsPagedResponse struct { - *PageOptions - Data []MongoDatabaseBackup `json:"data"` -} - -func (MongoDatabaseBackupsPagedResponse) endpoint(ids ...any) string { - id := ids[0].(int) - return fmt.Sprintf("databases/mongodb/instances/%d/backups", id) -} - -func (resp *MongoDatabaseBackupsPagedResponse) castResult(r *resty.Request, e string) (int, int, error) { - res, err := coupleAPIErrors(r.SetResult(MongoDatabaseBackupsPagedResponse{}).Get(e)) - if err != nil { - return 0, 0, err - } - castedRes := res.Result().(*MongoDatabaseBackupsPagedResponse) - resp.Data = append(resp.Data, castedRes.Data...) - return castedRes.Pages, castedRes.Results, nil -} - -// ListMongoDatabaseBackups lists all Mongo Database Backups associated with the given Mongo Database -func (c *Client) ListMongoDatabaseBackups(ctx context.Context, databaseID int, opts *ListOptions) ([]MongoDatabaseBackup, error) { - response := MongoDatabaseBackupsPagedResponse{} - - err := c.listHelper(ctx, &response, opts, databaseID) - if err != nil { - return nil, err - } - - return response.Data, nil -} - -// GetMongoDatabase returns a single Mongo Database matching the id -func (c *Client) GetMongoDatabase(ctx context.Context, databaseID int) (*MongoDatabase, error) { - e := fmt.Sprintf("databases/mongodb/instances/%d", databaseID) - req := c.R(ctx).SetResult(&MongoDatabase{}) - r, err := coupleAPIErrors(req.Get(e)) - if err != nil { - return nil, err - } - - return r.Result().(*MongoDatabase), nil -} - -// CreateMongoDatabase creates a new Mongo Database using the createOpts as configuration, returns the new Mongo Database -func (c *Client) CreateMongoDatabase(ctx context.Context, opts MongoCreateOptions) (*MongoDatabase, error) { - body, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - e := "databases/mongodb/instances" - req := c.R(ctx).SetResult(&MongoDatabase{}).SetBody(string(body)) - r, err := coupleAPIErrors(req.Post(e)) - if err != nil { - return nil, err - } - return r.Result().(*MongoDatabase), nil -} - -// DeleteMongoDatabase deletes an existing Mongo Database with the given id -func (c *Client) DeleteMongoDatabase(ctx context.Context, databaseID int) error { - e := fmt.Sprintf("databases/mongodb/instances/%d", databaseID) - _, err := coupleAPIErrors(c.R(ctx).Delete(e)) - return err -} - -// UpdateMongoDatabase updates the given Mongo Database with the provided opts, returns the MongoDatabase with the new settings -func (c *Client) UpdateMongoDatabase(ctx context.Context, databaseID int, opts MongoUpdateOptions) (*MongoDatabase, error) { - body, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - e := fmt.Sprintf("databases/mongodb/instances/%d", databaseID) - req := c.R(ctx).SetResult(&MongoDatabase{}).SetBody(string(body)) - r, err := coupleAPIErrors(req.Put(e)) - if err != nil { - return nil, err - } - - return r.Result().(*MongoDatabase), nil -} - -// PatchMongoDatabase applies security patches and updates to the underlying operating system of the Managed Mongo Database -func (c *Client) PatchMongoDatabase(ctx context.Context, databaseID int) error { - e := fmt.Sprintf("databases/mongodb/instances/%d/patch", databaseID) - _, err := coupleAPIErrors(c.R(ctx).Post(e)) - return err -} - -// GetMongoDatabaseCredentials returns the Root Credentials for the given Mongo Database -func (c *Client) GetMongoDatabaseCredentials(ctx context.Context, databaseID int) (*MongoDatabaseCredential, error) { - e := fmt.Sprintf("databases/mongodb/instances/%d/credentials", databaseID) - req := c.R(ctx).SetResult(&MongoDatabaseCredential{}) - r, err := coupleAPIErrors(req.Get(e)) - if err != nil { - return nil, err - } - - return r.Result().(*MongoDatabaseCredential), nil -} - -// ResetMongoDatabaseCredentials returns the Root Credentials for the given Mongo Database (may take a few seconds to work) -func (c *Client) ResetMongoDatabaseCredentials(ctx context.Context, databaseID int) error { - e := fmt.Sprintf("databases/mongodb/instances/%d/credentials/reset", databaseID) - _, err := coupleAPIErrors(c.R(ctx).Post(e)) - return err -} - -// GetMongoDatabaseSSL returns the SSL Certificate for the given Mongo Database -func (c *Client) GetMongoDatabaseSSL(ctx context.Context, databaseID int) (*MongoDatabaseSSL, error) { - e := fmt.Sprintf("databases/mongodb/instances/%d/ssl", databaseID) - req := c.R(ctx).SetResult(&MongoDatabaseSSL{}) - r, err := coupleAPIErrors(req.Get(e)) - if err != nil { - return nil, err - } - - return r.Result().(*MongoDatabaseSSL), nil -} - -// GetMongoDatabaseBackup returns a specific Mongo Database Backup with the given ids -func (c *Client) GetMongoDatabaseBackup(ctx context.Context, databaseID int, backupID int) (*MongoDatabaseBackup, error) { - e := fmt.Sprintf("databases/mongodb/instances/%d/backups/%d", databaseID, backupID) - req := c.R(ctx).SetResult(&MongoDatabaseBackup{}) - r, err := coupleAPIErrors(req.Get(e)) - if err != nil { - return nil, err - } - - return r.Result().(*MongoDatabaseBackup), nil -} - -// RestoreMongoDatabaseBackup returns the given Mongo Database with the given Backup -func (c *Client) RestoreMongoDatabaseBackup(ctx context.Context, databaseID int, backupID int) error { - e := fmt.Sprintf("databases/mongodb/instances/%d/backups/%d/restore", databaseID, backupID) - _, err := coupleAPIErrors(c.R(ctx).Post(e)) - return err -} - -// CreateMongoDatabaseBackup creates a snapshot for the given Mongo database -func (c *Client) CreateMongoDatabaseBackup(ctx context.Context, databaseID int, opts MongoBackupCreateOptions) error { - body, err := json.Marshal(opts) - if err != nil { - return err - } - e := fmt.Sprintf("databases/mongodb/instances/%d/backups", databaseID) - _, err = coupleAPIErrors(c.R(ctx).SetBody(string(body)).Post(e)) - return err -} diff --git a/vendor/github.com/linode/linodego/network_ips.go b/vendor/github.com/linode/linodego/network_ips.go index c22f72da5..5b86883e9 100644 --- a/vendor/github.com/linode/linodego/network_ips.go +++ b/vendor/github.com/linode/linodego/network_ips.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -39,6 +40,12 @@ type IPAddressesShareOptions struct { LinodeID int `json:"linode_id"` } +// ListIPAddressesQuery fields are those accepted as query params for the +// ListIPAddresses function. +type ListIPAddressesQuery struct { + SkipIPv6RDNS bool `query:"skip_ipv6_rdns"` +} + // GetUpdateOptions converts a IPAddress to IPAddressUpdateOptions for use in UpdateIPAddress func (i InstanceIP) GetUpdateOptions() (o IPAddressUpdateOptions) { o.RDNS = copyString(&i.RDNS) @@ -72,6 +79,7 @@ func (c *Client) ListIPAddresses(ctx context.Context, opts *ListOptions) ([]Inst // GetIPAddress gets the template with the provided ID func (c *Client) GetIPAddress(ctx context.Context, id string) (*InstanceIP, error) { + id = url.PathEscape(id) e := fmt.Sprintf("networking/ips/%s", id) req := c.R(ctx).SetResult(&InstanceIP{}) r, err := coupleAPIErrors(req.Get(e)) @@ -88,6 +96,7 @@ func (c *Client) UpdateIPAddress(ctx context.Context, id string, opts IPAddressU return nil, err } + id = url.PathEscape(id) e := fmt.Sprintf("networking/ips/%s", id) req := c.R(ctx).SetResult(&InstanceIP{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Put(e)) diff --git a/vendor/github.com/linode/linodego/network_pools.go b/vendor/github.com/linode/linodego/network_pools.go index 06c13c4bd..36f8b8f99 100644 --- a/vendor/github.com/linode/linodego/network_pools.go +++ b/vendor/github.com/linode/linodego/network_pools.go @@ -3,6 +3,7 @@ package linodego import ( "context" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -40,6 +41,7 @@ func (c *Client) ListIPv6Pools(ctx context.Context, opts *ListOptions) ([]IPv6Ra // GetIPv6Pool gets the template with the provided ID func (c *Client) GetIPv6Pool(ctx context.Context, id string) (*IPv6Range, error) { + id = url.PathEscape(id) e := fmt.Sprintf("networking/ipv6/pools/%s", id) req := c.R(ctx).SetResult(&IPv6Range{}) r, err := coupleAPIErrors(req.Get(e)) diff --git a/vendor/github.com/linode/linodego/network_ranges.go b/vendor/github.com/linode/linodego/network_ranges.go index 3caa714c7..6a0122372 100644 --- a/vendor/github.com/linode/linodego/network_ranges.go +++ b/vendor/github.com/linode/linodego/network_ranges.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -48,6 +49,7 @@ func (c *Client) ListIPv6Ranges(ctx context.Context, opts *ListOptions) ([]IPv6R // GetIPv6Range gets details about an IPv6 range func (c *Client) GetIPv6Range(ctx context.Context, ipRange string) (*IPv6Range, error) { + ipRange = url.PathEscape(ipRange) e := fmt.Sprintf("networking/ipv6/ranges/%s", ipRange) req := c.R(ctx).SetResult(&IPv6Range{}) r, err := coupleAPIErrors(req.Get(e)) @@ -75,6 +77,7 @@ func (c *Client) CreateIPv6Range(ctx context.Context, opts IPv6RangeCreateOption // DeleteIPv6Range deletes an IPv6 Range. func (c *Client) DeleteIPv6Range(ctx context.Context, ipRange string) error { + ipRange = url.PathEscape(ipRange) e := fmt.Sprintf("networking/ipv6/ranges/%s", ipRange) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/object_storage_bucket_certs.go b/vendor/github.com/linode/linodego/object_storage_bucket_certs.go index e8cca26f5..904f2dcda 100644 --- a/vendor/github.com/linode/linodego/object_storage_bucket_certs.go +++ b/vendor/github.com/linode/linodego/object_storage_bucket_certs.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" ) type ObjectStorageBucketCert struct { @@ -22,6 +23,8 @@ func (c *Client) UploadObjectStorageBucketCert(ctx context.Context, clusterID, b return nil, err } + clusterID = url.PathEscape(clusterID) + bucket = url.PathEscape(bucket) e := fmt.Sprintf("object-storage/buckets/%s/%s/ssl", clusterID, bucket) req := c.R(ctx).SetResult(&ObjectStorageBucketCert{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Post(e)) @@ -33,6 +36,8 @@ func (c *Client) UploadObjectStorageBucketCert(ctx context.Context, clusterID, b // GetObjectStorageBucketCert gets an ObjectStorageBucketCert func (c *Client) GetObjectStorageBucketCert(ctx context.Context, clusterID, bucket string) (*ObjectStorageBucketCert, error) { + clusterID = url.PathEscape(clusterID) + bucket = url.PathEscape(bucket) e := fmt.Sprintf("object-storage/buckets/%s/%s/ssl", clusterID, bucket) req := c.R(ctx).SetResult(&ObjectStorageBucketCert{}) r, err := coupleAPIErrors(req.Get(e)) @@ -44,6 +49,8 @@ func (c *Client) GetObjectStorageBucketCert(ctx context.Context, clusterID, buck // DeleteObjectStorageBucketCert deletes an ObjectStorageBucketCert func (c *Client) DeleteObjectStorageBucketCert(ctx context.Context, clusterID, bucket string) error { + clusterID = url.PathEscape(clusterID) + bucket = url.PathEscape(bucket) e := fmt.Sprintf("object-storage/buckets/%s/%s/ssl", clusterID, bucket) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/object_storage_buckets.go b/vendor/github.com/linode/linodego/object_storage_buckets.go index 9567eac66..060ec985d 100644 --- a/vendor/github.com/linode/linodego/object_storage_buckets.go +++ b/vendor/github.com/linode/linodego/object_storage_buckets.go @@ -84,7 +84,7 @@ type ObjectStorageBucketsPagedResponse struct { func (ObjectStorageBucketsPagedResponse) endpoint(args ...any) string { endpoint := "object-storage/buckets" if len(args) > 0 { - endpoint = fmt.Sprintf(endpoint+"/%s", args[0]) + endpoint = fmt.Sprintf(endpoint+"/%s", url.PathEscape(args[0].(string))) } return endpoint } @@ -122,6 +122,7 @@ func (c *Client) ListObjectStorageBucketsInCluster(ctx context.Context, opts *Li // GetObjectStorageBucket gets the ObjectStorageBucket with the provided label func (c *Client) GetObjectStorageBucket(ctx context.Context, clusterID, label string) (*ObjectStorageBucket, error) { label = url.PathEscape(label) + clusterID = url.PathEscape(clusterID) e := fmt.Sprintf("object-storage/buckets/%s/%s", clusterID, label) req := c.R(ctx).SetResult(&ObjectStorageBucket{}) r, err := coupleAPIErrors(req.Get(e)) @@ -150,6 +151,7 @@ func (c *Client) CreateObjectStorageBucket(ctx context.Context, opts ObjectStora // GetObjectStorageBucketAccess gets the current access config for a bucket func (c *Client) GetObjectStorageBucketAccess(ctx context.Context, clusterID, label string) (*ObjectStorageBucketAccess, error) { label = url.PathEscape(label) + clusterID = url.PathEscape(clusterID) e := fmt.Sprintf("object-storage/buckets/%s/%s/access", clusterID, label) req := c.R(ctx).SetResult(&ObjectStorageBucketAccess{}) r, err := coupleAPIErrors(req.Get(e)) @@ -168,6 +170,7 @@ func (c *Client) UpdateObjectStorageBucketAccess(ctx context.Context, clusterID, } label = url.PathEscape(label) + clusterID = url.PathEscape(clusterID) e := fmt.Sprintf("object-storage/buckets/%s/%s/access", clusterID, label) _, err = coupleAPIErrors(c.R(ctx).SetBody(string(body)).Post(e)) if err != nil { diff --git a/vendor/github.com/linode/linodego/object_storage_clusters.go b/vendor/github.com/linode/linodego/object_storage_clusters.go index c5fbeb587..4e4e3e267 100644 --- a/vendor/github.com/linode/linodego/object_storage_clusters.go +++ b/vendor/github.com/linode/linodego/object_storage_clusters.go @@ -3,6 +3,7 @@ package linodego import ( "context" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -49,6 +50,7 @@ func (c *Client) ListObjectStorageClusters(ctx context.Context, opts *ListOption // GetObjectStorageCluster gets the template with the provided ID func (c *Client) GetObjectStorageCluster(ctx context.Context, clusterID string) (*ObjectStorageCluster, error) { + clusterID = url.PathEscape(clusterID) e := fmt.Sprintf("object-storage/clusters/%s", clusterID) req := c.R(ctx).SetResult(&ObjectStorageCluster{}) r, err := coupleAPIErrors(req.Get(e)) diff --git a/vendor/github.com/linode/linodego/object_storage_object.go b/vendor/github.com/linode/linodego/object_storage_object.go index f18f03688..4d262f40e 100644 --- a/vendor/github.com/linode/linodego/object_storage_object.go +++ b/vendor/github.com/linode/linodego/object_storage_object.go @@ -37,6 +37,7 @@ func (c *Client) CreateObjectStorageObjectURL(ctx context.Context, objectID, lab } label = url.PathEscape(label) + objectID = url.PathEscape(objectID) e := fmt.Sprintf("object-storage/buckets/%s/%s/object-url", objectID, label) req := c.R(ctx).SetResult(&ObjectStorageObjectURL{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Post(e)) @@ -45,6 +46,7 @@ func (c *Client) CreateObjectStorageObjectURL(ctx context.Context, objectID, lab func (c *Client) GetObjectStorageObjectACLConfig(ctx context.Context, objectID, label, object string) (*ObjectStorageObjectACLConfig, error) { label = url.PathEscape(label) + object = url.QueryEscape(object) e := fmt.Sprintf("object-storage/buckets/%s/%s/object-acl?name=%s", objectID, label, object) req := c.R(ctx).SetResult(&ObjectStorageObjectACLConfig{}) r, err := coupleAPIErrors(req.Get(e)) diff --git a/vendor/github.com/linode/linodego/pagination.go b/vendor/github.com/linode/linodego/pagination.go index 2de2cfe32..0d5dff6bc 100644 --- a/vendor/github.com/linode/linodego/pagination.go +++ b/vendor/github.com/linode/linodego/pagination.go @@ -9,6 +9,7 @@ import ( "crypto/sha256" "encoding/json" "fmt" + "reflect" "strconv" "github.com/go-resty/resty/v2" @@ -16,16 +17,22 @@ import ( // PageOptions are the pagination parameters for List endpoints type PageOptions struct { - Page int `url:"page,omitempty" json:"page"` - Pages int `url:"pages,omitempty" json:"pages"` - Results int `url:"results,omitempty" json:"results"` + Page int `json:"page" url:"page,omitempty"` + Pages int `json:"pages" url:"pages,omitempty"` + Results int `json:"results" url:"results,omitempty"` } // ListOptions are the pagination and filtering (TODO) parameters for endpoints +//nolint type ListOptions struct { *PageOptions - PageSize int - Filter string + PageSize int `json:"page_size"` + Filter string `json:"filter"` + + // QueryParams allows for specifying custom query parameters on list endpoint + // calls. QueryParams should be an instance of a struct containing fields with + // the `query` tag. + QueryParams any } // NewListOptions simplified construction of ListOptions using only @@ -49,20 +56,33 @@ func (l ListOptions) Hash() (string, error) { return fmt.Sprintf("%x", h.Sum(nil)), nil } -func applyListOptionsToRequest(opts *ListOptions, req *resty.Request) { - if opts != nil { - if opts.PageOptions != nil && opts.Page > 0 { - req.SetQueryParam("page", strconv.Itoa(opts.Page)) - } +func applyListOptionsToRequest(opts *ListOptions, req *resty.Request) error { + if opts == nil { + return nil + } - if opts.PageSize > 0 { - req.SetQueryParam("page_size", strconv.Itoa(opts.PageSize)) + if opts.QueryParams != nil { + params, err := flattenQueryStruct(opts.QueryParams) + if err != nil { + return fmt.Errorf("failed to apply list options: %w", err) } - if len(opts.Filter) > 0 { - req.SetHeader("X-Filter", opts.Filter) - } + req.SetQueryParams(params) + } + + if opts.PageOptions != nil && opts.Page > 0 { + req.SetQueryParam("page", strconv.Itoa(opts.Page)) + } + + if opts.PageSize > 0 { + req.SetQueryParam("page_size", strconv.Itoa(opts.PageSize)) + } + + if len(opts.Filter) > 0 { + req.SetHeader("X-Filter", opts.Filter) } + + return nil } type PagedResponse interface { @@ -77,7 +97,9 @@ type PagedResponse interface { // opts.results and opts.pages will be updated from the API response func (c *Client) listHelper(ctx context.Context, pager PagedResponse, opts *ListOptions, ids ...any) error { req := c.R(ctx) - applyListOptionsToRequest(opts, req) + if err := applyListOptionsToRequest(opts, req); err != nil { + return err + } pages, results, err := pager.castResult(req, pager.endpoint(ids...)) if err != nil { @@ -102,3 +124,75 @@ func (c *Client) listHelper(ctx context.Context, pager PagedResponse, opts *List opts.Pages = pages return nil } + +// flattenQueryStruct flattens a structure into a Resty-compatible query param map. +// Fields are mapped using the `query` struct tag. +func flattenQueryStruct(val any) (map[string]string, error) { + result := make(map[string]string) + + reflectVal := reflect.ValueOf(val) + + // Deref pointer if necessary + if reflectVal.Kind() == reflect.Pointer { + if reflectVal.IsNil() { + return nil, fmt.Errorf("QueryParams is a nil pointer") + } + reflectVal = reflect.Indirect(reflectVal) + } + + if reflectVal.Kind() != reflect.Struct { + return nil, fmt.Errorf( + "expected struct type for the QueryParams but got: %s", + reflectVal.Kind().String(), + ) + } + + valType := reflectVal.Type() + + for i := 0; i < valType.NumField(); i++ { + currentField := valType.Field(i) + + queryTag, ok := currentField.Tag.Lookup("query") + // Skip untagged fields + if !ok { + continue + } + + valField := reflectVal.FieldByName(currentField.Name) + if !valField.IsValid() { + return nil, fmt.Errorf("invalid query param tag: %s", currentField.Name) + } + + // Skip if it's a zero value + if valField.IsZero() { + continue + } + + // Deref the pointer is necessary + if valField.Kind() == reflect.Pointer { + valField = reflect.Indirect(valField) + } + + fieldString, err := queryFieldToString(valField) + if err != nil { + return nil, err + } + + result[queryTag] = fieldString + } + + return result, nil +} + +func queryFieldToString(value reflect.Value) (string, error) { + switch value.Kind() { + case reflect.String: + return value.String(), nil + case reflect.Int64, reflect.Int32, reflect.Int: + return strconv.FormatInt(value.Int(), 10), nil + case reflect.Bool: + return strconv.FormatBool(value.Bool()), nil + default: + return "", fmt.Errorf("unsupported query param type: %s", value.Type().Name()) + } +} diff --git a/vendor/github.com/linode/linodego/regions.go b/vendor/github.com/linode/linodego/regions.go index 24970dd4f..f4210383e 100644 --- a/vendor/github.com/linode/linodego/regions.go +++ b/vendor/github.com/linode/linodego/regions.go @@ -3,6 +3,7 @@ package linodego import ( "context" "fmt" + "net/url" "time" "github.com/go-resty/resty/v2" @@ -74,7 +75,7 @@ func (c *Client) ListRegions(ctx context.Context, opts *ListOptions) ([]Region, // GetRegion gets the template with the provided ID. This endpoint is cached by default. func (c *Client) GetRegion(ctx context.Context, regionID string) (*Region, error) { - e := fmt.Sprintf("regions/%s", regionID) + e := fmt.Sprintf("regions/%s", url.PathEscape(regionID)) if result := c.getCachedResponse(e); result != nil { result := result.(Region) diff --git a/vendor/github.com/linode/linodego/tags.go b/vendor/github.com/linode/linodego/tags.go index 396708e2f..f22208ec4 100644 --- a/vendor/github.com/linode/linodego/tags.go +++ b/vendor/github.com/linode/linodego/tags.go @@ -83,7 +83,7 @@ type TaggedObjectsPagedResponse struct { // endpoint gets the endpoint URL for Tag func (TaggedObjectsPagedResponse) endpoint(ids ...any) string { - id := ids[0].(string) + id := url.PathEscape(ids[0].(string)) return fmt.Sprintf("tags/%s", id) } diff --git a/vendor/github.com/linode/linodego/types.go b/vendor/github.com/linode/linodego/types.go index ee6e4c8ca..f828b0ffb 100644 --- a/vendor/github.com/linode/linodego/types.go +++ b/vendor/github.com/linode/linodego/types.go @@ -3,6 +3,7 @@ package linodego import ( "context" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -93,7 +94,7 @@ func (c *Client) ListTypes(ctx context.Context, opts *ListOptions) ([]LinodeType // GetType gets the type with the provided ID. This endpoint is cached by default. func (c *Client) GetType(ctx context.Context, typeID string) (*LinodeType, error) { - e := fmt.Sprintf("linode/types/%s", typeID) + e := fmt.Sprintf("linode/types/%s", url.PathEscape(typeID)) if result := c.getCachedResponse(e); result != nil { result := result.(LinodeType) diff --git a/vendor/github.com/linode/linodego/waitfor.go b/vendor/github.com/linode/linodego/waitfor.go index 60fbf9ec3..91f197658 100644 --- a/vendor/github.com/linode/linodego/waitfor.go +++ b/vendor/github.com/linode/linodego/waitfor.go @@ -25,7 +25,7 @@ func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int, ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -52,7 +52,7 @@ func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID i ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -88,7 +88,7 @@ func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, stat ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -115,7 +115,7 @@ func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int, ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -144,7 +144,7 @@ func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, li ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -175,7 +175,7 @@ func (client Client) WaitForLKEClusterStatus(ctx context.Context, clusterID int, ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -237,7 +237,7 @@ func (client Client) WaitForLKEClusterConditions( return fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %w", clusterID, err) } - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() conditionOptions := ClusterConditionOptions{LKEClusterKubeconfig: lkeKubeConfig, TransportWrapper: options.TransportWrapper} @@ -270,8 +270,15 @@ func (client Client) WaitForLKEClusterConditions( // WaitForEventFinished waits for an entity action to reach the 'finished' state // before returning. It will timeout with an error after timeoutSeconds. // If the event indicates a failure both the failed event and the error will be returned. -// nolint -func (client Client) WaitForEventFinished(ctx context.Context, id any, entityType EntityType, action EventAction, minStart time.Time, timeoutSeconds int) (*Event, error) { +//nolint +func (client Client) WaitForEventFinished( + ctx context.Context, + id any, + entityType EntityType, + action EventAction, + minStart time.Time, + timeoutSeconds int, +) (*Event, error) { titledEntityType := strings.Title(string(entityType)) filter := Filter{ Order: Descending, @@ -291,12 +298,11 @@ func (client Client) WaitForEventFinished(ctx context.Context, id any, entityTyp // All of the filter supported types have int ids filterableEntityID, err := strconv.Atoi(fmt.Sprintf("%v", id)) if err != nil { - return nil, fmt.Errorf("Error parsing Entity ID %q for optimized WaitForEventFinished EventType %q: %w", id, entityType, err) + return nil, fmt.Errorf("error parsing Entity ID %q for optimized "+ + "WaitForEventFinished EventType %q: %w", id, entityType, err) } filter.AddField(Eq, "entity.id", filterableEntityID) filter.AddField(Eq, "entity.type", entityType) - - // TODO: are we conformatable with pages = 0 with the event type and id filter? } ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) @@ -307,7 +313,7 @@ func (client Client) WaitForEventFinished(ctx context.Context, id any, entityTyp log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id) } - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) // avoid repeating log messages nextLog := "" @@ -369,8 +375,6 @@ func (client Client) WaitForEventFinished(ctx context.Context, id any, entityTyp continue } - // @TODO(displague) This event.Created check shouldn't be needed, but it appears - // that the ListEvents method is not populating it correctly if event.Created == nil { log.Printf("[WARN] event.Created is nil when API returned: %#+v", event.Created) } @@ -387,7 +391,7 @@ func (client Client) WaitForEventFinished(ctx context.Context, id any, entityTyp log.Printf("[INFO] %s %v action %s is finished", titledEntityType, id, action) return &event, nil } - // TODO(displague) can we bump the ticker to TimeRemaining/2 (>=1) when non-nil? + nextLog = fmt.Sprintf("[INFO] %s %v action %s is %s", titledEntityType, id, action, event.Status) } @@ -408,7 +412,7 @@ func (client Client) WaitForImageStatus(ctx context.Context, imageID string, sta ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -434,7 +438,7 @@ func (client Client) WaitForMySQLDatabaseBackup(ctx context.Context, dbID int, l ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -456,39 +460,12 @@ func (client Client) WaitForMySQLDatabaseBackup(ctx context.Context, dbID int, l } } -// WaitForMongoDatabaseBackup waits for the backup with the given label to be available. -func (client Client) WaitForMongoDatabaseBackup(ctx context.Context, dbID int, label string, timeoutSeconds int) (*MongoDatabaseBackup, error) { - ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) - defer cancel() - - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - backups, err := client.ListMongoDatabaseBackups(ctx, dbID, nil) - if err != nil { - return nil, err - } - - for _, backup := range backups { - if backup.Label == label { - return &backup, nil - } - } - case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for backup %s: %w", label, ctx.Err()) - } - } -} - // WaitForPostgresDatabaseBackup waits for the backup with the given label to be available. func (client Client) WaitForPostgresDatabaseBackup(ctx context.Context, dbID int, label string, timeoutSeconds int) (*PostgresDatabaseBackup, error) { ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -521,14 +498,6 @@ var databaseStatusHandlers = map[DatabaseEngineType]databaseStatusFunc{ return db.Status, nil }, - DatabaseEngineTypeMongo: func(ctx context.Context, client Client, dbID int) (DatabaseStatus, error) { - db, err := client.GetMongoDatabase(ctx, dbID) - if err != nil { - return "", err - } - - return db.Status, nil - }, DatabaseEngineTypePostgres: func(ctx context.Context, client Client, dbID int) (DatabaseStatus, error) { db, err := client.GetPostgresDatabase(ctx, dbID) if err != nil { @@ -546,7 +515,7 @@ func (client Client) WaitForDatabaseStatus( ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(client.pollInterval) defer ticker.Stop() for { @@ -646,7 +615,7 @@ func (p *EventPoller) PreTask(ctx context.Context) error { } func (p *EventPoller) WaitForLatestUnknownEvent(ctx context.Context) (*Event, error) { - ticker := time.NewTicker(p.client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(p.client.pollInterval) defer ticker.Stop() f := Filter{ @@ -697,7 +666,7 @@ func (p *EventPoller) WaitForFinished( ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) defer cancel() - ticker := time.NewTicker(p.client.millisecondsPerPoll * time.Millisecond) + ticker := time.NewTicker(p.client.pollInterval) defer ticker.Stop() event, err := p.WaitForLatestUnknownEvent(ctx) @@ -726,3 +695,56 @@ func (p *EventPoller) WaitForFinished( } } } + +// WaitForResourceFree waits for a resource to have no running events. +func (client Client) WaitForResourceFree( + ctx context.Context, entityType EntityType, entityID any, timeoutSeconds int, +) error { + apiFilter := Filter{ + Order: Descending, + OrderBy: "created", + } + apiFilter.AddField(Eq, "entity.id", entityID) + apiFilter.AddField(Eq, "entity.type", entityType) + + filterStr, err := apiFilter.MarshalJSON() + if err != nil { + return fmt.Errorf("failed to create filter: %s", err) + } + + ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) + defer cancel() + + ticker := time.NewTicker(client.pollInterval) + defer ticker.Stop() + + // A helper function to determine whether a resource is busy + checkIsBusy := func(events []Event) bool { + for _, event := range events { + if event.Status == EventStarted || event.Status == EventScheduled { + return true + } + } + + return false + } + + for { + select { + case <-ticker.C: + events, err := client.ListEvents(ctx, &ListOptions{ + Filter: string(filterStr), + }) + if err != nil { + return fmt.Errorf("failed to list events: %s", err) + } + + if !checkIsBusy(events) { + return nil + } + + case <-ctx.Done(): + return fmt.Errorf("failed to wait for resource free: %s", ctx.Err()) + } + } +} diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index 9051ae007..9549fa923 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -6,7 +6,6 @@ import ( "context" "crypto/tls" "encoding/binary" - "fmt" "io" "net" "strings" @@ -56,14 +55,20 @@ type Client struct { // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and // Client.Dialer) or context.Context.Deadline (see ExchangeContext) - Timeout time.Duration - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations. - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight + Timeout time.Duration + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations. + + // SingleInflight previously serialised multiple concurrent queries for the + // same Qname, Qtype and Qclass to ensure only one would be in flight at a + // time. + // + // Deprecated: This is a no-op. Callers should implement their own in flight + // query caching if needed. See github.com/miekg/dns/issues/1449. + SingleInflight bool } // Exchange performs a synchronous UDP query. It sends the message m to the address @@ -178,33 +183,13 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er // This allows users of the library to implement their own connection management, // as opposed to Exchange, which will always use new connections and incur the added overhead // that entails when using "tcp" and especially "tcp-tls" clients. -// -// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to -// prevent one cancellation from canceling all outstanding requests. func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { - return c.exchangeWithConnContext(context.Background(), m, conn) + return c.ExchangeWithConnContext(context.Background(), m, conn) } -func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchangeContext(ctx, m, conn) - } - - q := m.Question[0] - key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) - r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { - // When we're doing singleflight we don't want one context cancellation, cancel _all_ outstanding queries. - // Hence we ignore the context and use Background(). - return c.exchangeContext(context.Background(), m, conn) - }) - if r != nil && shared { - r = r.Copy() - } - - return r, rtt, err -} - -func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) { +// ExchangeWithConnContext has the same behaviour as ExchangeWithConn and +// additionally obeys deadlines from the passed Context. +func (c *Client) ExchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) { opt := m.IsEdns0() // If EDNS0 is used use that for size. if opt != nil && opt.UDPSize() >= MinMsgSize { @@ -474,5 +459,5 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, } defer conn.Close() - return c.exchangeWithConnContext(ctx, m, conn) + return c.ExchangeWithConnContext(ctx, m, conn) } diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index 75b17f0c1..c1558b79c 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -272,18 +272,24 @@ func IsMsg(buf []byte) error { // IsFqdn checks if a domain name is fully qualified. func IsFqdn(s string) bool { - s2 := strings.TrimSuffix(s, ".") - if s == s2 { + // Check for (and remove) a trailing dot, returning if there isn't one. + if s == "" || s[len(s)-1] != '.' { return false } + s = s[:len(s)-1] - i := strings.LastIndexFunc(s2, func(r rune) bool { + // If we don't have an escape sequence before the final dot, we know it's + // fully qualified and can return here. + if s == "" || s[len(s)-1] != '\\' { + return true + } + + // Otherwise we have to check if the dot is escaped or not by checking if + // there are an odd or even number of escape sequences before the dot. + i := strings.LastIndexFunc(s, func(r rune) bool { return r != '\\' }) - - // Test whether we have an even number of escape sequences before - // the dot or none. - return (len(s2)-i)%2 != 0 + return (len(s)-i)%2 != 0 } // IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 57be98827..3083c3e5f 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -10,13 +10,13 @@ import ( "strings" ) -const maxTok = 2048 // Largest token we can return. +const maxTok = 512 // Token buffer start size, and growth size amount. // The maximum depth of $INCLUDE directives supported by the // ZoneParser API. const maxIncludeDepth = 7 -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// Tokenize a RFC 1035 zone file. The tokenizer will normalize it: // * Add ownernames if they are left blank; // * Suppress sequences of spaces; // * Make each RR fit on one line (_NEWLINE is send as last) @@ -765,8 +765,8 @@ func (zl *zlexer) Next() (lex, bool) { } var ( - str [maxTok]byte // Hold string text - com [maxTok]byte // Hold comment text + str = make([]byte, maxTok) // Hold string text + com = make([]byte, maxTok) // Hold comment text stri int // Offset in str (0 means empty) comi int // Offset in com (0 means empty) @@ -785,14 +785,12 @@ func (zl *zlexer) Next() (lex, bool) { l.line, l.column = zl.line, zl.column if stri >= len(str) { - l.token = "token length insufficient for parsing" - l.err = true - return *l, true + // if buffer length is insufficient, increase it. + str = append(str[:], make([]byte, maxTok)...) } if comi >= len(com) { - l.token = "comment length insufficient for parsing" - l.err = true - return *l, true + // if buffer length is insufficient, increase it. + com = append(com[:], make([]byte, maxTok)...) } switch x { @@ -816,7 +814,7 @@ func (zl *zlexer) Next() (lex, bool) { if stri == 0 { // Space directly in the beginning, handled in the grammar } else if zl.owner { - // If we have a string and its the first, make it an owner + // If we have a string and it's the first, make it an owner l.value = zOwner l.token = string(str[:stri]) diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 2d44a3987..d08c8e6a7 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -904,11 +904,18 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { + if l.err { return &ParseError{"", "bad RRSIG Algorithm", l} } - rr.Algorithm = uint8(i) + i, e := strconv.ParseUint(l.token, 10, 8) + rr.Algorithm = uint8(i) // if 0 we'll check the mnemonic in the if + if e != nil { + v, ok := StringToAlgorithm[l.token] + if !ok { + return &ParseError{"", "bad RRSIG Algorithm", l} + } + rr.Algorithm = v + } c.Next() // zBlank l, _ = c.Next() diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index febcc300f..000000000 --- a/vendor/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized - - dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - if !g.dontDeleteForTesting { - g.Lock() - delete(g.m, key) - g.Unlock() - } - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index f03a169c2..5891044a3 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 53} +var Version = v{1, 1, 55} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/pkg/browser/LICENSE b/vendor/github.com/pkg/browser/LICENSE new file mode 100644 index 000000000..65f78fb62 --- /dev/null +++ b/vendor/github.com/pkg/browser/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md new file mode 100644 index 000000000..72b1976e3 --- /dev/null +++ b/vendor/github.com/pkg/browser/README.md @@ -0,0 +1,55 @@ + +# browser + import "github.com/pkg/browser" + +Package browser provides helpers to open files, readers, and urls in a browser window. + +The choice of which browser is started is entirely client dependant. + + + + + +## Variables +``` go +var Stderr io.Writer = os.Stderr +``` +Stderr is the io.Writer to which executed commands write standard error. + +``` go +var Stdout io.Writer = os.Stdout +``` +Stdout is the io.Writer to which executed commands write standard output. + + +## func OpenFile +``` go +func OpenFile(path string) error +``` +OpenFile opens new browser window for the file path. + + +## func OpenReader +``` go +func OpenReader(r io.Reader) error +``` +OpenReader consumes the contents of r and presents the +results in a new browser window. + + +## func OpenURL +``` go +func OpenURL(url string) error +``` +OpenURL opens a new browser window pointing to url. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go new file mode 100644 index 000000000..d7969d74d --- /dev/null +++ b/vendor/github.com/pkg/browser/browser.go @@ -0,0 +1,57 @@ +// Package browser provides helpers to open files, readers, and urls in a browser window. +// +// The choice of which browser is started is entirely client dependant. +package browser + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +// Stdout is the io.Writer to which executed commands write standard output. +var Stdout io.Writer = os.Stdout + +// Stderr is the io.Writer to which executed commands write standard error. +var Stderr io.Writer = os.Stderr + +// OpenFile opens new browser window for the file path. +func OpenFile(path string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + return OpenURL("file://" + path) +} + +// OpenReader consumes the contents of r and presents the +// results in a new browser window. +func OpenReader(r io.Reader) error { + f, err := ioutil.TempFile("", "browser.*.html") + if err != nil { + return fmt.Errorf("browser: could not create temporary file: %v", err) + } + if _, err := io.Copy(f, r); err != nil { + f.Close() + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + return OpenFile(f.Name()) +} + +// OpenURL opens a new browser window pointing to url. +func OpenURL(url string) error { + return openBrowser(url) +} + +func runCmd(prog string, args ...string) error { + cmd := exec.Command(prog, args...) + cmd.Stdout = Stdout + cmd.Stderr = Stderr + return cmd.Run() +} diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go new file mode 100644 index 000000000..8507cf7c2 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -0,0 +1,5 @@ +package browser + +func openBrowser(url string) error { + return runCmd("open", url) +} diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go new file mode 100644 index 000000000..4fc7ff076 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_freebsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go new file mode 100644 index 000000000..d26cdddf9 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -0,0 +1,21 @@ +package browser + +import ( + "os/exec" + "strings" +) + +func openBrowser(url string) error { + providers := []string{"xdg-open", "x-www-browser", "www-browser"} + + // There are multiple possible providers to open a browser on linux + // One of them is xdg-open, another is x-www-browser, then there's www-browser, etc. + // Look for one that exists and run it + for _, provider := range providers { + if _, err := exec.LookPath(provider); err == nil { + return runCmd(provider, url) + } + } + + return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound} +} diff --git a/vendor/github.com/pkg/browser/browser_netbsd.go b/vendor/github.com/pkg/browser/browser_netbsd.go new file mode 100644 index 000000000..65a5e5a29 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_netbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from pkgsrc(7)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go new file mode 100644 index 000000000..4fc7ff076 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go new file mode 100644 index 000000000..7c5c17d34 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!darwin,!openbsd,!freebsd,!netbsd + +package browser + +import ( + "fmt" + "runtime" +) + +func openBrowser(url string) error { + return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) +} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go new file mode 100644 index 000000000..63e192959 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -0,0 +1,7 @@ +package browser + +import "golang.org/x/sys/windows" + +func openBrowser(url string) error { + return windows.ShellExecute(0, nil, windows.StringToUTF16Ptr(url), nil, nil, windows.SW_SHOWNORMAL) +} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 5c51d5a0d..7f7595dcd 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage/remote/azuread" ) var ( @@ -146,13 +147,14 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout and ScrapeInterval default to the - // configured globals. - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, + // ScrapeTimeout and ScrapeInterval default to the configured + // globals. + ScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -266,7 +268,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { for i, scfg := range c.ScrapeConfigs { // We do these checks for library users that would not call Validate in // Unmarshal. - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, err } @@ -293,7 +295,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { return nil, fileErr(filename, err) } for _, scfg := range cfg.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return nil, fileErr(filename, err) } @@ -342,7 +344,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + if err := scfg.Validate(c.GlobalConfig); err != nil { return err } @@ -389,6 +391,27 @@ type GlobalConfig struct { QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` + // An uncompressed response body larger than this many bytes will cause the + // scrape to fail. 0 means no limit. + BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` + // More than this many samples post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + SampleLimit uint `yaml:"sample_limit,omitempty"` + // More than this many targets after the target relabeling will cause the + // scrapes to fail. 0 means no limit. + TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // Keep no more than this many dropped targets per job. + // 0 means no limit. + KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -467,6 +490,8 @@ type ScrapeConfig struct { ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` // The timeout for scraping targets of this config. ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // Whether to scrape a classic histogram that is also exposed as a native histogram. + ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -475,20 +500,26 @@ type ScrapeConfig struct { // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` // More than this many samples post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. SampleLimit uint `yaml:"sample_limit,omitempty"` // More than this many targets after the target relabeling will cause the - // scrapes to fail. + // scrapes to fail. 0 means no limit. TargetLimit uint `yaml:"target_limit,omitempty"` // More than this many labels post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. LabelLimit uint `yaml:"label_limit,omitempty"` // More than this label name length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` // More than this label value length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // More than this many buckets in a native histogram will cause the scrape to + // fail. + NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"` + // Keep no more than this many dropped targets per job. + // 0 means no limit. + KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -546,25 +577,47 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error { +func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c == nil { return errors.New("empty or null scrape config section") } // First set the correct scrape interval, then check that the timeout // (inferred or explicit) is not greater than that. if c.ScrapeInterval == 0 { - c.ScrapeInterval = defaultInterval + c.ScrapeInterval = globalConfig.ScrapeInterval } if c.ScrapeTimeout > c.ScrapeInterval { return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) } if c.ScrapeTimeout == 0 { - if defaultTimeout > c.ScrapeInterval { + if globalConfig.ScrapeTimeout > c.ScrapeInterval { c.ScrapeTimeout = c.ScrapeInterval } else { - c.ScrapeTimeout = defaultTimeout + c.ScrapeTimeout = globalConfig.ScrapeTimeout } } + if c.BodySizeLimit == 0 { + c.BodySizeLimit = globalConfig.BodySizeLimit + } + if c.SampleLimit == 0 { + c.SampleLimit = globalConfig.SampleLimit + } + if c.TargetLimit == 0 { + c.TargetLimit = globalConfig.TargetLimit + } + if c.LabelLimit == 0 { + c.LabelLimit = globalConfig.LabelLimit + } + if c.LabelNameLengthLimit == 0 { + c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit + } + if c.LabelValueLengthLimit == 0 { + c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit + } + if c.KeepDroppedTargets == 0 { + c.KeepDroppedTargets = globalConfig.KeepDroppedTargets + } + return nil } @@ -864,6 +917,7 @@ type RemoteWriteConfig struct { QueueConfig QueueConfig `yaml:"queue_config,omitempty"` MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` + AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -900,8 +954,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil - if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + } + + if c.SigV4Config != nil && c.AzureADConfig != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") } return nil @@ -922,7 +980,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) diff --git a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go index c59bd1f5d..99ea396b9 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go +++ b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go @@ -111,6 +111,7 @@ func init() { // SDConfig is the configuration for Consul service discovery. type SDConfig struct { Server string `yaml:"server,omitempty"` + PathPrefix string `yaml:"path_prefix,omitempty"` Token config.Secret `yaml:"token,omitempty"` Datacenter string `yaml:"datacenter,omitempty"` Namespace string `yaml:"namespace,omitempty"` @@ -211,6 +212,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { clientConf := &consul.Config{ Address: conf.Server, + PathPrefix: conf.PathPrefix, Scheme: conf.Scheme, Datacenter: conf.Datacenter, Namespace: conf.Namespace, diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index c45595c6d..60b63350f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -226,8 +226,8 @@ func (d *Discovery) watchFiles() { panic("no watcher configured") } for _, p := range d.paths { - if idx := strings.LastIndex(p, "/"); idx > -1 { - p = p[:idx] + if dir, _ := filepath.Split(p); dir != "" { + p = dir } else { p = "./" } diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go index 50afdc1ec..6d0599dfa 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go +++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go @@ -22,7 +22,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/hetznercloud/hcloud-go/hcloud" + "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -91,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er targets := make([]model.LabelSet, len(servers)) for i, server := range servers { labels := model.LabelSet{ - hetznerLabelRole: model.LabelValue(hetznerRoleHcloud), + hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go index 084319d95..c3f7ec39c 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go +++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hetzner.go @@ -20,7 +20,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/hetznercloud/hcloud-go/hcloud" + "github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -57,7 +57,7 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` - Role role `yaml:"role"` + Role Role `yaml:"role"` hcloudEndpoint string // For tests only. robotEndpoint string // For tests only. } @@ -74,26 +74,26 @@ type refresher interface { refresh(context.Context) ([]*targetgroup.Group, error) } -// role is the role of the target within the Hetzner Ecosystem. -type role string +// Role is the Role of the target within the Hetzner Ecosystem. +type Role string // The valid options for role. const ( // Hetzner Robot Role (Dedicated Server) // https://robot.hetzner.com - hetznerRoleRobot role = "robot" + HetznerRoleRobot Role = "robot" // Hetzner Cloud Role // https://console.hetzner.cloud - hetznerRoleHcloud role = "hcloud" + HetznerRoleHcloud Role = "hcloud" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { if err := unmarshal((*string)(c)); err != nil { return err } switch *c { - case hetznerRoleRobot, hetznerRoleHcloud: + case HetznerRoleRobot, HetznerRoleHcloud: return nil default: return fmt.Errorf("unknown role %q", *c) @@ -143,12 +143,12 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { switch conf.Role { - case hetznerRoleHcloud: + case HetznerRoleHcloud: if conf.hcloudEndpoint == "" { conf.hcloudEndpoint = hcloud.Endpoint } return newHcloudDiscovery(conf, l) - case hetznerRoleRobot: + case HetznerRoleRobot: if conf.robotEndpoint == "" { conf.robotEndpoint = "https://robot-ws.your-server.de" } diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go index 496088028..1d8aa9302 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go +++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go @@ -105,7 +105,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) targets := make([]model.LabelSet, len(servers)) for i, server := range servers { labels := model.LabelSet{ - hetznerLabelRole: model.LabelValue(hetznerRoleRobot), + hetznerLabelRole: model.LabelValue(HetznerRoleRobot), hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)), hetznerLabelServerName: model.LabelValue(server.Server.ServerName), hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)), diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go index 27742ab46..7200d52dd 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go @@ -29,7 +29,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -248,9 +247,6 @@ func endpointsSourceFromNamespaceAndName(namespace, name string) string { } const ( - endpointsLabelPrefix = metaLabelPrefix + "endpoints_label_" - endpointsLabelPresentPrefix = metaLabelPrefix + "endpoints_labelpresent_" - endpointsNameLabel = metaLabelPrefix + "endpoints_name" endpointNodeName = metaLabelPrefix + "endpoint_node_name" endpointHostname = metaLabelPrefix + "endpoint_hostname" endpointReadyLabel = metaLabelPrefix + "endpoint_ready" @@ -265,16 +261,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { Source: endpointsSource(eps), } tg.Labels = model.LabelSet{ - namespaceLabel: lv(eps.Namespace), - endpointsNameLabel: lv(eps.Name), + namespaceLabel: lv(eps.Namespace), } e.addServiceLabels(eps.Namespace, eps.Name, tg) // Add endpoints labels metadata. - for k, v := range eps.Labels { - ln := strutil.SanitizeLabelName(k) - tg.Labels[model.LabelName(endpointsLabelPrefix+ln)] = lv(v) - tg.Labels[model.LabelName(endpointsLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpoint) type podEntry struct { pod *apiv1.Pod @@ -305,7 +296,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { } if e.withNodeMetadata { - target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) + if addr.NodeName != nil { + target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) + } else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name) + } } pod := e.resolvePodRef(addr.TargetRef) @@ -385,18 +380,21 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } @@ -458,13 +456,7 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L node := obj.(*apiv1.Node) // Allocate one target label for the node name, - // and two target labels for each node label. - nodeLabelset := make(model.LabelSet, 1+2*len(node.GetLabels())) - nodeLabelset[nodeNameLabel] = lv(*nodeName) - for k, v := range node.GetLabels() { - ln := strutil.SanitizeLabelName(k) - nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v) - nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue - } + nodeLabelset := make(model.LabelSet) + addObjectMetaLabels(nodeLabelset, node.ObjectMeta, RoleNode) return tg.Merge(nodeLabelset) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go index 841b7d4f6..e241c758b 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go @@ -252,7 +252,6 @@ func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { } const ( - endpointSliceNameLabel = metaLabelPrefix + "endpointslice_name" endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type" endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name" endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol" @@ -274,9 +273,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } tg.Labels = model.LabelSet{ namespaceLabel: lv(eps.namespace()), - endpointSliceNameLabel: lv(eps.name()), endpointSliceAddressTypeLabel: lv(eps.addressType()), } + + addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice) + e.addServiceLabels(eps, tg) type podEntry struct { @@ -339,7 +340,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if e.withNodeMetadata { - target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + if ep.targetRef() != nil && ep.targetRef().Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name) + } else { + target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + } } pod := e.resolvePodRef(ep.targetRef()) @@ -412,18 +417,21 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go index 5a21f1b89..46fa708c1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go @@ -17,11 +17,13 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" "k8s.io/api/discovery/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // endpointSliceAdaptor is an adaptor for the different EndpointSlice versions type endpointSliceAdaptor interface { get() interface{} + getObjectMeta() metav1.ObjectMeta name() string namespace() string addressType() string @@ -66,6 +68,10 @@ func (e *endpointSliceAdaptorV1) get() interface{} { return e.endpointSlice } +func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta { + return e.endpointSlice.ObjectMeta +} + func (e *endpointSliceAdaptorV1) name() string { return e.endpointSlice.ObjectMeta.Name } @@ -115,6 +121,10 @@ func (e *endpointSliceAdaptorV1Beta1) get() interface{} { return e.endpointSlice } +func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { + return e.endpointSlice.ObjectMeta +} + func (e *endpointSliceAdaptorV1Beta1) name() string { return e.endpointSlice.Name } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go index ad47c341a..697b6f519 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -143,37 +142,22 @@ func ingressSourceFromNamespaceAndName(namespace, name string) string { } const ( - ingressNameLabel = metaLabelPrefix + "ingress_name" - ingressLabelPrefix = metaLabelPrefix + "ingress_label_" - ingressLabelPresentPrefix = metaLabelPrefix + "ingress_labelpresent_" - ingressAnnotationPrefix = metaLabelPrefix + "ingress_annotation_" - ingressAnnotationPresentPrefix = metaLabelPrefix + "ingress_annotationpresent_" - ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" - ingressHostLabel = metaLabelPrefix + "ingress_host" - ingressPathLabel = metaLabelPrefix + "ingress_path" - ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" + ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" + ingressHostLabel = metaLabelPrefix + "ingress_host" + ingressPathLabel = metaLabelPrefix + "ingress_path" + ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) func ingressLabels(ingress ingressAdaptor) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(ingress.labels())+len(ingress.annotations()))+2) - ls[ingressNameLabel] = lv(ingress.name()) + ls := make(model.LabelSet) ls[namespaceLabel] = lv(ingress.namespace()) if cls := ingress.ingressClassName(); cls != nil { ls[ingressClassNameLabel] = lv(*cls) } - for k, v := range ingress.labels() { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(ingressLabelPrefix+ln)] = lv(v) - ls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress) - for k, v := range ingress.annotations() { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue - } return ls } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go index 113a067ca..7be8538b5 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go @@ -16,10 +16,12 @@ package kubernetes import ( v1 "k8s.io/api/networking/v1" "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ingressAdaptor is an adaptor for the different Ingress versions type ingressAdaptor interface { + getObjectMeta() metav1.ObjectMeta name() string namespace() string labels() map[string]string @@ -43,11 +45,12 @@ func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor { return &ingressAdaptorV1{ingress: ingress} } -func (i *ingressAdaptorV1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } +func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } +func (i *ingressAdaptorV1) name() string { return i.ingress.Name } +func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } +func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } +func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } +func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } func (i *ingressAdaptorV1) tlsHosts() []string { var hosts []string @@ -95,12 +98,12 @@ type ingressAdaptorV1Beta1 struct { func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { return &ingressAdaptorV1Beta1{ingress: ingress} } - -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } +func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } +func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } +func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } +func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } +func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } +func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } func (i *ingressAdaptorV1Beta1) tlsHosts() []string { var hosts []string diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go index a44bd513c..ca5ee49e2 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "github.com/prometheus/prometheus/util/strutil" + disv1beta1 "k8s.io/api/discovery/v1beta1" "github.com/go-kit/log" @@ -761,15 +763,21 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[nodeIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not a pod") + return nil, fmt.Errorf("object is not endpoints") } var nodes []string for _, target := range e.Subsets { for _, addr := range target.Addresses { - if addr.NodeName == nil { - continue + if addr.TargetRef != nil { + switch addr.TargetRef.Kind { + case "Pod": + if addr.NodeName != nil { + nodes = append(nodes, *addr.NodeName) + } + case "Node": + nodes = append(nodes, addr.TargetRef.Name) + } } - nodes = append(nodes, *addr.NodeName) } } return nodes, nil @@ -789,17 +797,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object switch e := obj.(type) { case *disv1.EndpointSlice: for _, target := range e.Endpoints { - if target.NodeName == nil { - continue + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) + } + case "Node": + nodes = append(nodes, target.TargetRef.Name) + } } - nodes = append(nodes, *target.NodeName) } case *disv1beta1.EndpointSlice: for _, target := range e.Endpoints { - if target.NodeName == nil { - continue + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) + } + case "Node": + nodes = append(nodes, target.TargetRef.Name) + } } - nodes = append(nodes, *target.NodeName) } default: return nil, fmt.Errorf("object is not an endpointslice") @@ -825,3 +845,19 @@ func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 return semVer.Major() >= 1 && semVer.Minor() >= 21, nil } + +func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) + + for k, v := range objectMeta.Labels { + ln := strutil.SanitizeLabelName(k) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_label_"+ln)] = lv(v) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_labelpresent_"+ln)] = presentValue + } + + for k, v := range objectMeta.Annotations { + ln := strutil.SanitizeLabelName(k) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotation_"+ln)] = lv(v) + labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue + } +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go index 16a06e7a0..6a20e7b1f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go @@ -152,33 +152,18 @@ func nodeSourceFromName(name string) string { } const ( - nodeNameLabel = metaLabelPrefix + "node_name" - nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" - nodeLabelPrefix = metaLabelPrefix + "node_label_" - nodeLabelPresentPrefix = metaLabelPrefix + "node_labelpresent_" - nodeAnnotationPrefix = metaLabelPrefix + "node_annotation_" - nodeAnnotationPresentPrefix = metaLabelPrefix + "node_annotationpresent_" - nodeAddressPrefix = metaLabelPrefix + "node_address_" + nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" + nodeAddressPrefix = metaLabelPrefix + "node_address_" ) func nodeLabels(n *apiv1.Node) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(n.Labels)+len(n.Annotations))+1) + ls := make(model.LabelSet) - ls[nodeNameLabel] = lv(n.Name) ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID) - for k, v := range n.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(nodeLabelPrefix+ln)] = lv(v) - ls[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue - } + addObjectMetaLabels(ls, n.ObjectMeta, RoleNode) - for k, v := range n.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(nodeAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(nodeAnnotationPresentPrefix+ln)] = presentValue - } return ls } @@ -209,7 +194,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { return tg } -// nodeAddresses returns the provided node's address, based on the priority: +// nodeAddress returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeInternalDNS // 3. NodeExternalIP diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go index 732cf52ad..74f74c1f7 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go @@ -30,7 +30,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) const nodeIndex = "node" @@ -180,7 +179,6 @@ func convertToPod(o interface{}) (*apiv1.Pod, error) { } const ( - podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" podContainerIDLabel = metaLabelPrefix + "pod_container_id" @@ -191,10 +189,6 @@ const ( podContainerIsInit = metaLabelPrefix + "pod_container_init" podReadyLabel = metaLabelPrefix + "pod_ready" podPhaseLabel = metaLabelPrefix + "pod_phase" - podLabelPrefix = metaLabelPrefix + "pod_label_" - podLabelPresentPrefix = metaLabelPrefix + "pod_labelpresent_" - podAnnotationPrefix = metaLabelPrefix + "pod_annotation_" - podAnnotationPresentPrefix = metaLabelPrefix + "pod_annotationpresent_" podNodeNameLabel = metaLabelPrefix + "pod_node_name" podHostIPLabel = metaLabelPrefix + "pod_host_ip" podUID = metaLabelPrefix + "pod_uid" @@ -215,7 +209,6 @@ func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference { func podLabels(pod *apiv1.Pod) model.LabelSet { ls := model.LabelSet{ - podNameLabel: lv(pod.ObjectMeta.Name), podIPLabel: lv(pod.Status.PodIP), podReadyLabel: podReady(pod), podPhaseLabel: lv(string(pod.Status.Phase)), @@ -224,6 +217,8 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { podUID: lv(string(pod.ObjectMeta.UID)), } + addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) + createdBy := GetControllerOf(pod) if createdBy != nil { if createdBy.Kind != "" { @@ -234,18 +229,6 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { } } - for k, v := range pod.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(podLabelPrefix+ln)] = lv(v) - ls[model.LabelName(podLabelPresentPrefix+ln)] = presentValue - } - - for k, v := range pod.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(podAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(podAnnotationPresentPrefix+ln)] = presentValue - } - return ls } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go index 40e17679e..7addf0054 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/strutil" ) var ( @@ -147,38 +146,20 @@ func serviceSourceFromNamespaceAndName(namespace, name string) string { } const ( - serviceNameLabel = metaLabelPrefix + "service_name" - serviceLabelPrefix = metaLabelPrefix + "service_label_" - serviceLabelPresentPrefix = metaLabelPrefix + "service_labelpresent_" - serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_" - serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_" - servicePortNameLabel = metaLabelPrefix + "service_port_name" - servicePortNumberLabel = metaLabelPrefix + "service_port_number" - servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" - serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" - serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" - serviceExternalNameLabel = metaLabelPrefix + "service_external_name" - serviceType = metaLabelPrefix + "service_type" + servicePortNameLabel = metaLabelPrefix + "service_port_name" + servicePortNumberLabel = metaLabelPrefix + "service_port_number" + servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" + serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" + serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip" + serviceExternalNameLabel = metaLabelPrefix + "service_external_name" + serviceType = metaLabelPrefix + "service_type" ) func serviceLabels(svc *apiv1.Service) model.LabelSet { - // Each label and annotation will create two key-value pairs in the map. - ls := make(model.LabelSet, 2*(len(svc.Labels)+len(svc.Annotations))+2) - - ls[serviceNameLabel] = lv(svc.Name) + ls := make(model.LabelSet) ls[namespaceLabel] = lv(svc.Namespace) + addObjectMetaLabels(ls, svc.ObjectMeta, RoleService) - for k, v := range svc.Labels { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(serviceLabelPrefix+ln)] = lv(v) - ls[model.LabelName(serviceLabelPresentPrefix+ln)] = presentValue - } - - for k, v := range svc.Annotations { - ln := strutil.SanitizeLabelName(k) - ls[model.LabelName(serviceAnnotationPrefix+ln)] = lv(v) - ls[model.LabelName(serviceAnnotationPresentPrefix+ln)] = presentValue - } return ls } diff --git a/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go b/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go index 12b957514..63213c87b 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go +++ b/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go @@ -304,10 +304,10 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelGroup: model.LabelValue(instance.Group), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), - linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Disk<<20)), - linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Memory<<20)), + linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), + linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), - linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Transfer<<20)), + linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) diff --git a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go index cfd3e2c08..3baf79aff 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go +++ b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go @@ -106,14 +106,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 { return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured") } - if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") - } - if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") - } - if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { - return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + + if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 { + switch { + case c.HTTPClientConfig.BasicAuth != nil: + return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") + case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0: + return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") + case c.HTTPClientConfig.Authorization != nil: + return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + } } return c.HTTPClientConfig.Validate() } diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go index 2f7e99a07..b2fe1e787 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go @@ -36,6 +36,7 @@ const ( openstackLabelAddressPool = openstackLabelPrefix + "address_pool" openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor" openstackLabelInstanceID = openstackLabelPrefix + "instance_id" + openstackLabelInstanceImage = openstackLabelPrefix + "instance_image" openstackLabelInstanceName = openstackLabelPrefix + "instance_name" openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status" openstackLabelPrivateIP = openstackLabelPrefix + "private_ip" @@ -144,12 +145,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - id, ok := s.Flavor["id"].(string) + flavorId, ok := s.Flavor["id"].(string) if !ok { level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") continue } - labels[openstackLabelInstanceFlavor] = model.LabelValue(id) + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId) + + imageId, ok := s.Image["id"].(string) + if ok { + labels[openstackLabelInstanceImage] = model.LabelValue(imageId) + } + for k, v := range s.Metadata { name := strutil.SanitizeLabelName(k) labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index f95f0051c..41873278c 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -15,6 +15,7 @@ package histogram import ( "fmt" + "math" "strings" ) @@ -93,26 +94,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { Sum: h.Sum, } - // TODO(beorn7): This is a straight-forward implementation using merging - // iterators for the original buckets and then adding one merged bucket - // after another to the newly created FloatHistogram. It's well possible - // that a more involved implementation performs much better, which we - // could do if this code path turns out to be performance-critical. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); { - b := it.At() - c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); { - b := it.At() - c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } + c.PositiveSpans, c.PositiveBuckets = mergeToSchema(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema) + c.NegativeSpans, c.NegativeBuckets = mergeToSchema(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema) return &c } @@ -148,6 +131,55 @@ func (h *FloatHistogram) String() string { return sb.String() } +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + // ZeroBucket returns the zero bucket. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { return Bucket[float64]{ @@ -159,12 +191,12 @@ func (h *FloatHistogram) ZeroBucket() Bucket[float64] { } } -// Scale scales the FloatHistogram by the provided factor, i.e. it scales all +// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all // bucket counts including the zero bucket and the count and the sum of // observations. The bucket layout stays the same. This method changes the // receiving histogram directly (rather than acting on a copy). It returns a // pointer to the receiving histogram for convenience. -func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { +func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { h.ZeroCount *= factor h.Count *= factor h.Sum *= factor @@ -177,6 +209,21 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { return h } +// Div works like Mul but divides instead of multiplies. +// When dividing by 0, everything will be set to Inf. +func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { + h.ZeroCount /= scalar + h.Count /= scalar + h.Sum /= scalar + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] /= scalar + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] /= scalar + } + return h +} + // Add adds the provided other histogram to the receiving histogram. Count, Sum, // and buckets from the other histogram are added to the corresponding // components of the receiving histogram. Buckets in the other histogram that do @@ -221,23 +268,17 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -248,25 +289,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -301,103 +334,6 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { return true } -// addBucket takes the "coordinates" of the last bucket that was handled and -// adds the provided bucket after it. If a corresponding bucket exists, the -// count is added. If not, the bucket is inserted. The updated slices and the -// coordinates of the inserted or added-to bucket are returned. -func addBucket( - b Bucket[float64], - spans []Span, buckets []float64, - iSpan, iBucket int, - iInSpan, index int32, -) ( - newSpans []Span, newBuckets []float64, - newISpan, newIBucket int, newIInSpan int32, -) { - if iSpan == -1 { - // First add, check if it is before all spans. - if len(spans) == 0 || spans[0].Offset > b.Index { - // Add bucket before all others. - buckets = append(buckets, 0) - copy(buckets[1:], buckets) - buckets[0] = b.Count - if len(spans) > 0 && spans[0].Offset == b.Index+1 { - spans[0].Length++ - spans[0].Offset-- - return spans, buckets, 0, 0, 0 - } - spans = append(spans, Span{}) - copy(spans[1:], spans) - spans[0] = Span{Offset: b.Index, Length: 1} - if len(spans) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spans[1].Offset -= b.Index + 1 - } - return spans, buckets, 0, 0, 0 - } - if spans[0].Offset == b.Index { - // Just add to first bucket. - buckets[0] += b.Count - return spans, buckets, 0, 0, 0 - } - // We are behind the first bucket, so set everything to the - // first bucket and continue normally. - iSpan, iBucket, iInSpan = 0, 0, 0 - index = spans[0].Offset - } - deltaIndex := b.Index - index - for { - remainingInSpan := int32(spans[iSpan].Length) - iInSpan - if deltaIndex < remainingInSpan { - // Bucket is in current span. - iBucket += int(deltaIndex) - iInSpan += deltaIndex - buckets[iBucket] += b.Count - return spans, buckets, iSpan, iBucket, iInSpan - } - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - buckets = append(buckets, 0) - copy(buckets[iBucket+1:], buckets[iBucket:]) - buckets[iBucket] = b.Count - if deltaIndex == 0 { - // Directly after previous span, extend previous span. - if iSpan < len(spans) { - spans[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spans[iSpan].Length) - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { - // Directly before next span, extend next span. - iInSpan = 0 - spans[iSpan].Offset-- - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spans) { - spans[iSpan].Offset -= deltaIndex + 1 - } - spans = append(spans, Span{}) - copy(spans[iSpan+1:], spans[iSpan:]) - spans[iSpan] = Span{Length: 1, Offset: deltaIndex} - return spans, buckets, iSpan, iBucket, iInSpan - } - // Try start of next span. - deltaIndex -= spans[iSpan].Offset - iInSpan = 0 - } -} - // Compact eliminates empty buckets at the beginning and end of each span, then // merges spans that are consecutive or at most maxEmptyBuckets apart, and // finally splits spans that contain more consecutive empty buckets than @@ -406,7 +342,7 @@ func addBucket( // receiving histogram, but a pointer to it is returned for convenience. // // The ideal value for maxEmptyBuckets depends on circumstances. The motivation -// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// to set maxEmptyBuckets > 0 is the assumption that is less overhead to // represent very few empty buckets explicitly within one span than cutting the // one span into two to treat the empty buckets as a gap between the two spans, // both in terms of storage requirement as well as in terms of encoding and @@ -600,10 +536,24 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] // set to the zero threshold. func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ - h: h, - negIter: h.NegativeReverseBucketIterator(), - posIter: h.PositiveBucketIterator(), - state: -1, + h: h, + leftIter: h.NegativeReverseBucketIterator(), + rightIter: h.PositiveBucketIterator(), + state: -1, + } +} + +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: h.PositiveReverseBucketIterator(), + rightIter: h.NegativeBucketIterator(), + state: -1, } } @@ -789,6 +739,11 @@ type floatBucketIterator struct { absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. } +func (i *floatBucketIterator) At() Bucket[float64] { + // Need to use i.targetSchema rather than i.baseBucketIterator.schema. + return i.baseBucketIterator.at(i.targetSchema) +} + func (i *floatBucketIterator) Next() bool { if i.spansIdx >= len(i.spans) { return false @@ -888,8 +843,8 @@ func (i *reverseFloatBucketIterator) Next() bool { } type allFloatBucketIterator struct { - h *FloatHistogram - negIter, posIter BucketIterator[float64] + h *FloatHistogram + leftIter, rightIter BucketIterator[float64] // -1 means we are iterating negative buckets. // 0 means it is time for the zero bucket. // 1 means we are iterating positive buckets. @@ -901,10 +856,13 @@ type allFloatBucketIterator struct { func (i *allFloatBucketIterator) Next() bool { switch i.state { case -1: - if i.negIter.Next() { - i.currBucket = i.negIter.At() - if i.currBucket.Upper > -i.h.ZeroThreshold { + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold } return true } @@ -925,10 +883,13 @@ func (i *allFloatBucketIterator) Next() bool { } return i.Next() case 1: - if i.posIter.Next() { - i.currBucket = i.posIter.At() - if i.currBucket.Lower < i.h.ZeroThreshold { + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold } return true } @@ -942,3 +903,202 @@ func (i *allFloatBucketIterator) Next() bool { func (i *allFloatBucketIterator) At() Bucket[float64] { return i.currBucket } + +// targetIdx returns the bucket index in the target schema for the given bucket +// index idx in the original schema. +func targetIdx(idx, originSchema, targetSchema int32) int32 { + return ((idx - 1) >> (originSchema - targetSchema)) + 1 +} + +// mergeToSchema is used to merge a FloatHistogram's Spans and Buckets (no matter if +// positive or negative) from the original schema to the target schema. +// The target schema must be smaller than the original schema. +func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, targetSchema int32) ([]Span, []float64) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []float64 // The buckets in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + origBucketIdx int // The position of a bucket in originBuckets slice. + ) + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx := targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[0]) + lastTargetBucketIdx = targetBucketIdx + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + targetBuckets[len(targetBuckets)-1] += originBuckets[origBucketIdx] + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) + lastTargetBucketIdx++ + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) + lastTargetBucketIdx = targetBucketIdx + } + + bucketIdx++ + origBucketIdx++ + } + } + + return targetSpans, targetBuckets +} + +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { + var ( + iSpan int = -1 + iBucket int = -1 + iInSpan int32 + indexA int32 + indexB int32 + bIdxB int + bucketB float64 + deltaIndex int32 + lowerThanThreshold = true + ) + + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && getBound(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false + + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } else { + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketB + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketB + break + } else { + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 + } + } + } + + nextLoop: + indexA = indexB + indexB++ + bIdxB++ + } + } + + return spansA, bucketsA +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go index e1de5ffb5..dad54cb06 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -102,16 +102,22 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { } func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { + return b.at(b.schema) +} + +// at is an internal version of the exported At to enable using a different +// schema. +func (b baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { bucket := Bucket[BC]{ Count: BC(b.currCount), Index: b.currIdx, } if b.positive { - bucket.Upper = getBound(b.currIdx, b.schema) - bucket.Lower = getBound(b.currIdx-1, b.schema) + bucket.Upper = getBound(b.currIdx, schema) + bucket.Lower = getBound(b.currIdx-1, schema) } else { - bucket.Lower = -getBound(b.currIdx, b.schema) - bucket.Upper = -getBound(b.currIdx-1, b.schema) + bucket.Lower = -getBound(b.currIdx, schema) + bucket.Upper = -getBound(b.currIdx-1, schema) } bucket.LowerInclusive = bucket.Lower < 0 bucket.UpperInclusive = bucket.Upper > 0 diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 9ac0e5b53..0c27e15c7 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -621,7 +621,7 @@ func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) } -// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go similarity index 89% rename from vendor/github.com/prometheus/prometheus/model/labels/labels_string.go rename to vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index 6d54e98ab..a87545a26 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -49,12 +49,6 @@ type Labels struct { data string } -type labelSlice []Label - -func (ls labelSlice) Len() int { return len(ls) } -func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } -func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } - func decodeSize(data string, index int) (int, int) { // Fast-path for common case of a single byte, value 0..127. b := data[index] @@ -273,26 +267,53 @@ func (ls Labels) Copy() Labels { // Get returns the value for the label with the given name. // Returns an empty string if the label doesn't exist. func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - if lName == name { - return lValue + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return "" } // Has returns true if the label with the given name is present. func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } for i := 0; i < len(ls.data); { - var lName string - lName, i = decodeString(ls.data, i) - _, i = decodeString(ls.data, i) - if lName == name { - return true + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + return true + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size } + size, i = decodeSize(ls.data, i) + i += size } return false } @@ -422,37 +443,49 @@ func FromStrings(ss ...string) Labels { // Compare compares the two label sets. // The result will be 0 if a==b, <0 if a < b, and >0 if a > b. -// TODO: replace with Less function - Compare is never needed. -// TODO: just compare the underlying strings when we don't need alphanumeric sorting. func Compare(a, b Labels) int { - l := len(a.data) - if len(b.data) < l { - l = len(b.data) - } - - ia, ib := 0, 0 - for ia < l { - var aName, bName string - aName, ia = decodeString(a.data, ia) - bName, ib = decodeString(b.data, ib) - if aName != bName { - if aName < bName { - return -1 - } - return 1 - } - var aValue, bValue string - aValue, ia = decodeString(a.data, ia) - bValue, ib = decodeString(b.data, ib) - if aValue != bValue { - if aValue < bValue { - return -1 - } - return 1 + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) + lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break } } - // If all labels so far were in common, the set with fewer labels comes first. - return len(a.data) - len(b.data) + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break + } + } + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) + } + + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent := i + for i = 0; ; { + size, nextI := decodeSize(a.data, i) + if nextI+size > firstCharDifferent { + break + } + i = nextI + size + } + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 } // Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. @@ -800,7 +833,7 @@ func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) } -// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. func (b *ScratchBuilder) Assign(l Labels) { b.output = l } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index e09a63772..14319c7f7 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -25,9 +25,16 @@ type FastRegexMatcher struct { prefix string suffix string contains string + + // shortcut for literals + literal bool + value string } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + if isLiteral(v) { + return &FastRegexMatcher{literal: true, value: v}, nil + } re, err := regexp.Compile("^(?:" + v + ")$") if err != nil { return nil, err @@ -50,6 +57,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { } func (m *FastRegexMatcher) MatchString(s string) bool { + if m.literal { + return s == m.value + } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false } @@ -63,9 +73,16 @@ func (m *FastRegexMatcher) MatchString(s string) bool { } func (m *FastRegexMatcher) GetRegexString() string { + if m.literal { + return m.value + } return m.re.String() } +func isLiteral(re string) bool { + return regexp.QuoteMeta(re) == re +} + // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 5027c3963..fadf35b86 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -202,10 +202,12 @@ func (re Regexp) String() string { return str[4 : len(str)-2] } -// Process returns a relabeled copy of the given label set. The relabel configurations +// Process returns a relabeled version of the given label set. The relabel configurations // are applied in order of input. +// There are circumstances where Process will modify the input label. +// If you want to avoid issues with the input label set being modified, at the cost of +// higher memory usage, you can use lbls.Copy(). // If a label set is dropped, EmptyLabels and false is returned. -// May return the input labelSet modified. func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) { lb := labels.NewBuilder(lbls) if !ProcessBuilder(lb, cfgs...) { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 9efd942e8..38903afc9 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -59,7 +59,9 @@ type Parser interface { Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed - // exemplar. It returns if an exemplar exists or not. + // exemplar. It can be called repeatedly to retrieve multiple exemplars + // for the same sample. It returns false once all exemplars are + // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool // Next advances the parser to the next sample. It returns false if no @@ -71,7 +73,7 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) { if contentType == "" { return NewPromParser(b), nil } @@ -84,7 +86,7 @@ func New(b []byte, contentType string) (Parser, error) { case "application/openmetrics-text": return NewOpenMetricsParser(b), nil case "application/vnd.google.protobuf": - return NewProtobufParser(b), nil + return NewProtobufParser(b, parseClassicHistograms), nil default: return NewPromParser(b), nil } @@ -100,7 +102,7 @@ const ( EntrySeries Entry = 2 // A series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a sparse histogram as a value. + EntryHistogram Entry = 5 // A series with a native histogram as a value. ) // MetricType represents metric type values. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index c17d40020..e0833636f 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -174,8 +174,10 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns the whether an exemplar exists. +// Exemplar writes the exemplar of the current sample into the passed exemplar. +// It returns whether an exemplar exists. As OpenMetrics only ever has one +// exemplar per sample, every call after the first (for the same sample) will +// always return false. func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { if len(p.exemplar) == 0 { return false @@ -204,6 +206,8 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { p.builder.Sort() e.Labels = p.builder.Labels() + // Wipe exemplar so that future calls return false. + p.exemplar = p.exemplar[:0] return true } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index eca145955..fbb84a2bd 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -52,8 +52,14 @@ type ProtobufParser struct { // fieldPos is the position within a Summary or (legacy) Histogram. -2 // is the count. -1 is the sum. Otherwise it is the index within // quantiles/buckets. - fieldPos int - fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool + // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. state Entry @@ -62,17 +68,22 @@ type ProtobufParser struct { mf *dto.MetricFamily + // Wether to also parse a classic histogram that is also present as a + // native histogram. + parseClassicHistograms bool + // The following are just shenanigans to satisfy the Parser interface. metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. } // NewProtobufParser returns a parser for the payload in the byte slice. -func NewProtobufParser(b []byte) Parser { +func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser { return &ProtobufParser{ - in: b, - state: EntryInvalid, - mf: &dto.MetricFamily{}, - metricBytes: &bytes.Buffer{}, + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, } } @@ -98,7 +109,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { v = float64(s.GetSampleCount()) case -1: v = s.GetSampleSum() - // Need to detect a summaries without quantile here. + // Need to detect summaries without quantile here. if len(s.GetQuantile()) == 0 { p.fieldsDone = true } @@ -106,19 +117,28 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { v = s.GetQuantile()[p.fieldPos].GetValue() } case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - // This should only happen for a legacy histogram. + // This should only happen for a classic histogram. h := m.GetHistogram() switch p.fieldPos { case -2: - v = float64(h.GetSampleCount()) + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } case -1: v = h.GetSampleSum() default: bb := h.GetBucket() if p.fieldPos >= len(bb) { - v = float64(h.GetSampleCount()) + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } } else { - v = float64(bb[p.fieldPos].GetCumulativeCount()) + v = bb[p.fieldPos].GetCumulativeCountFloat() + if v == 0 { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } } } default: @@ -149,6 +169,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his ts = m.GetTimestampMs() h = m.GetHistogram() ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { + p.redoClassic = true + } if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { // It is a float histogram. fh := histogram.FloatHistogram{ @@ -274,8 +297,12 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // Exemplar writes the exemplar of the current sample into the passed // exemplar. It returns if an exemplar exists or not. In case of a native // histogram, the legacy bucket section is still used for exemplars. To ingest -// all examplars, call the Exemplar method repeatedly until it returns false. +// all exemplars, call the Exemplar method repeatedly until it returns false. func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar switch p.mf.GetType() { @@ -316,6 +343,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { } p.builder.Sort() ex.Labels = p.builder.Labels() + p.exemplarReturned = true return true } @@ -323,6 +351,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // text format parser). It returns (EntryInvalid, io.EOF) if no samples were // read. func (p *ProtobufParser) Next() (Entry, error) { + p.exemplarReturned = false switch p.state { case EntryInvalid: p.metricPos = 0 @@ -376,6 +405,12 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } case EntryHistogram, EntrySeries: + if p.redoClassic { + p.redoClassic = false + p.state = EntrySeries + p.fieldPos = -3 + p.fieldsDone = false + } t := p.mf.GetType() if p.state == EntrySeries && !p.fieldsDone && (t == dto.MetricType_SUMMARY || @@ -386,6 +421,14 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + // If this is a metric family containing native + // histograms, we have to switch back to native + // histograms after parsing a classic histogram. + if p.state == EntrySeries && + (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } } if p.metricPos >= len(p.mf.GetMetric()) { p.state = EntryInvalid @@ -432,7 +475,7 @@ func (p *ProtobufParser) updateMetricBytes() error { // state. func (p *ProtobufParser) getMagicName() string { t := p.mf.GetType() - if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { return p.mf.GetName() } if p.fieldPos == -2 { @@ -521,18 +564,17 @@ func formatOpenMetricsFloat(f float64) string { return s + ".0" } -// isNativeHistogram returns false iff the provided histograms has no sparse -// buckets and a zero threshold of 0 and a zero count of 0. In principle, this -// could still be meant to be a native histogram (with a zero threshold of 0 and -// no observations yet), but for now, we'll treat this case as a conventional -// histogram. -// -// TODO(beorn7): In the final format, there should be an unambiguous way of -// deciding if a histogram should be ingested as a conventional one or a native -// one. +// isNativeHistogram returns false iff the provided histograms has no spans at +// all (neither positive nor negative) and a zero threshold of 0 and a zero +// count of 0. In principle, this could still be meant to be a native histogram +// with a zero threshold of 0 and no observations yet. In that case, +// instrumentation libraries should add a "no-op" span (e.g. length zero, offset +// zero) to signal that the histogram is meant to be parsed as a native +// histogram. Failing to do so will cause Prometheus to parse it as a classic +// histogram as long as no observations have happened. func isNativeHistogram(h *dto.Histogram) bool { - return len(h.GetNegativeDelta()) > 0 || - len(h.GetPositiveDelta()) > 0 || - h.GetZeroCount() > 0 || - h.GetZeroThreshold() > 0 + return len(h.GetPositiveSpan()) > 0 || + len(h.GetNegativeSpan()) > 0 || + h.GetZeroThreshold() > 0 || + h.GetZeroCount() > 0 } diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index c3b2e5c7e..891372c43 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -349,19 +349,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - // Attach external labels before relabelling and sending. - for _, a := range alerts { - lb := labels.NewBuilder(a.Labels) - - n.opts.ExternalLabels.Range(func(l labels.Label) { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - }) - - a.Labels = lb.Labels() - } - alerts = n.relabelAlerts(alerts) if len(alerts) == 0 { return @@ -390,15 +377,25 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } +// Attach external labels and process relabelling rules. func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) var relabeledAlerts []*Alert - for _, alert := range alerts { - labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if keep { - alert.Labels = labels - relabeledAlerts = append(relabeledAlerts, alert) + for _, a := range alerts { + lb.Reset(a.Labels) + n.opts.ExternalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...) + if !keep { + continue } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) } return relabeledAlerts } @@ -701,36 +698,38 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) for _, tlset := range tg.Targets { - lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels)) + lb.Reset(labels.EmptyLabels()) for ln, lv := range tlset { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } // Set configured scheme as the initial scheme label for overwrite. - lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme}) - lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)}) + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) // Combine target labels with target group labels. for ln, lv := range tg.Labels { if _, ok := tlset[ln]; !ok { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } } - lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) if !keep { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)}) + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) continue } - addr := lset.Get(model.AddressLabel) + addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - res = append(res, alertmanagerLabels{lset}) + res = append(res, alertmanagerLabels{lb.Labels()}) } return res, droppedAlertManagers, nil } diff --git a/vendor/github.com/prometheus/prometheus/prompb/README.md b/vendor/github.com/prometheus/prometheus/prompb/README.md index 8c19b17e9..a33d7bfb8 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/README.md +++ b/vendor/github.com/prometheus/prometheus/prompb/README.md @@ -4,6 +4,6 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the script to run, you'll need `protoc` (version 3.12.3) in your -PATH. +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in +your PATH. diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go index 3e4bc7df8..83a7da779 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -414,6 +414,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto index 6bbea622f..3fef2b6d0 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -97,6 +97,9 @@ message Histogram { repeated double negative_count = 11; // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false]; // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index cbeeb82a1..816f20721 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -130,11 +130,35 @@ type Query interface { String() string } -type QueryOpts struct { +type PrometheusQueryOpts struct { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. - EnablePerStepStats bool + enablePerStepStats bool // Lookback delta duration for this query. - LookbackDelta time.Duration + lookbackDelta time.Duration +} + +var _ QueryOpts = &PrometheusQueryOpts{} + +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { + return &PrometheusQueryOpts{ + enablePerStepStats: enablePerStepStats, + lookbackDelta: lookbackDelta, + } +} + +func (p *PrometheusQueryOpts) EnablePerStepStats() bool { + return p.enablePerStepStats +} + +func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { + return p.lookbackDelta +} + +type QueryOpts interface { + // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. + EnablePerStepStats() bool + // Lookback delta duration for this query. + LookbackDelta() time.Duration } // query implements the Query interface. @@ -408,69 +432,74 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { - expr, err := parser.ParseExpr(qs) +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) + finishQueue, err := ng.queueActive(ctx, qry) if err != nil { return nil, err } - qry, err := ng.newQuery(q, opts, expr, ts, ts, 0) + defer finishQueue() + expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } - qry.q = qs + if err := ng.validateOpts(expr); err != nil { + return nil, err + } + *pExpr = PreprocessExpr(expr, ts, ts) return qry, nil } // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { + return nil, err + } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } + if err := ng.validateOpts(expr); err != nil { + return nil, err + } if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - qry, err := ng.newQuery(q, opts, expr, start, end, interval) - if err != nil { - return nil, err - } - qry.q = qs + *pExpr = PreprocessExpr(expr, start, end) return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { - if err := ng.validateOpts(expr); err != nil { - return nil, err - } - - // Default to empty QueryOpts if not provided. +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = &QueryOpts{} + opts = NewPrometheusQueryOpts(false, 0) } - lookbackDelta := opts.LookbackDelta + lookbackDelta := opts.LookbackDelta() if lookbackDelta <= 0 { lookbackDelta = ng.lookbackDelta } es := &parser.EvalStmt{ - Expr: PreprocessExpr(expr, start, end), Start: start, End: end, Interval: interval, LookbackDelta: lookbackDelta, } qry := &query{ + q: qs, stmt: es, ng: ng, stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), queryable: q, } - return qry, nil + return &es.Expr, qry } var ( @@ -589,18 +618,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) - // Log query in active log. The active log guarantees that we don't run over - // MaxConcurrent queries. - if ng.activeQueryTracker != nil { - queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) - if err != nil { - queueSpanTimer.Finish() - return nil, nil, contextErr(err, "query queue") - } - defer ng.activeQueryTracker.Delete(queryIndex) + finishQueue, err := ng.queueActive(ctx, q) + if err != nil { + return nil, nil, err } - queueSpanTimer.Finish() + defer finishQueue() // Cancel when execution is done or an error was raised. defer q.cancel() @@ -623,6 +645,18 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) } +// Log query in active log. The active log guarantees that we don't run over +// MaxConcurrent queries. +func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) { + if ng.activeQueryTracker == nil { + return func() {}, nil + } + queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) + queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) + queueSpanTimer.Finish() + return func() { ng.activeQueryTracker.Delete(queryIndex) }, err +} + func timeMilliseconds(t time.Time) int64 { return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) } @@ -1109,7 +1143,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } } enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} - seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash. tempNumSamples := ev.currentSamples var ( @@ -1194,9 +1232,6 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Make the function call. enh.Ts = ts result, ws := funcCall(args, bufHelpers, enh) - if result.ContainsSameLabelset() { - ev.errorf("vector cannot contain metrics with the same labelset") - } enh.Out = result[:0] // Reuse result vector. warnings = append(warnings, ws...) @@ -1213,6 +1248,9 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { + if result.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } mat := make(Matrix, len(result)) for i, s := range result { if s.H == nil { @@ -1230,8 +1268,13 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) for _, sample := range result { h := sample.Metric.Hash() ss, ok := seriess[h] - if !ok { - ss = Series{Metric: sample.Metric} + if ok { + if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. + ev.errorf("vector cannot contain metrics with the same labelset") + } + ss.ts = ts + } else { + ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} } if sample.H == nil { if ss.Floats == nil { @@ -1258,7 +1301,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Assemble the output matrix. By the time we get here we know we don't have too many samples. mat := make(Matrix, 0, len(seriess)) for _, ss := range seriess { - mat = append(mat, ss) + mat = append(mat, ss.Series) } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1353,15 +1396,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - if vs.Timestamp != nil { - // This is a special case only for "timestamp" since the offset - // needs to be adjusted for every point. - vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond - } - val, ws := ev.vectorSelector(vs, enh.Ts) - return call([]parser.Value{val}, e.Args, enh), ws - }) + return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) } } @@ -1799,38 +1834,48 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } -// vectorSelector evaluates a *parser.VectorSelector expression. -func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vector, storage.Warnings) { - ws, err := checkAndExpandSeriesSet(ev.ctx, node) +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { + ws, err := checkAndExpandSeriesSet(ev.ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - vec := make(Vector, 0, len(node.Series)) - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range node.Series { - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - t, f, h, ok := ev.vectorSelectorSingle(it, node, ts) - if ok { - vec = append(vec, Sample{ - Metric: node.Series[i].Labels(), - T: t, - F: f, - H: h, - }) + seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) + for i, s := range vs.Series { + it := s.Iterator(nil) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + } - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtTimestamp(ts, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + if vs.Timestamp != nil { + // This is a special case for "timestamp()" when the @ modifier is used, to ensure that + // we return a point for each time step in this case. + // See https://github.com/prometheus/prometheus/issues/8433. + vs.Offset = time.Duration(enh.Ts-*vs.Timestamp) * time.Millisecond } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) - return vec, ws + vec := make(Vector, 0, len(vs.Series)) + for i, s := range vs.Series { + it := seriesIterators[i] + t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + if ok { + vec = append(vec, Sample{ + Metric: s.Labels(), + T: t, + F: f, + H: h, + }) + + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return call([]parser.Value{vec}, e.Args, enh), ws + }) } // vectorSelectorSingle evaluates an instant vector for the iterator of one time series. @@ -1850,14 +1895,14 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no } case chunkenc.ValFloat: t, v = it.At() - case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: + case chunkenc.ValFloatHistogram: t, h = it.AtFloatHistogram() default: panic(fmt.Errorf("unknown value type %v", valueType)) } if valueType == chunkenc.ValNone || t > refTime { var ok bool - t, v, _, h, ok = it.PeekPrev() + t, v, h, ok = it.PeekPrev() if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } @@ -2263,14 +2308,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * insertedSigs[insertSig] = struct{}{} } - if (hl != nil && hr != nil) || (hl == nil && hr == nil) { - // Both lhs and rhs are of same type. - enh.Out = append(enh.Out, Sample{ - Metric: metric, - F: floatValue, - H: histogramValue, - }) - } + enh.Out = append(enh.Out, Sample{ + Metric: metric, + F: floatValue, + H: histogramValue, + }) } return enh.Out } @@ -2337,28 +2379,33 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { for _, lhsSample := range lhs { - lv, rv := lhsSample.F, rhs.V + lf, rf := lhsSample.F, rhs.V + var rh *histogram.FloatHistogram + lh := lhsSample.H // lhs always contains the Vector. If the original position was different // swap for calculating the value. if swap { - lv, rv = rv, lv + lf, rf = rf, lf + lh, rh = rh, lh } - value, _, keep := vectorElemBinop(op, lv, rv, nil, nil) + float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { - value = rv + float = rf + histogram = rh } if returnBool { if keep { - value = 1.0 + float = 1.0 } else { - value = 0.0 + float = 0.0 } keep = true } if keep { - lhsSample.F = value + lhsSample.F = float + lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) } @@ -2413,16 +2460,33 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram // The histogram being added must have the larger schema // code (i.e. the higher resolution). if hrhs.Schema >= hlhs.Schema { - return 0, hlhs.Copy().Add(hrhs), true + return 0, hlhs.Copy().Add(hrhs).Compact(0), true } - return 0, hrhs.Copy().Add(hlhs), true + return 0, hrhs.Copy().Add(hlhs).Compact(0), true } return lhs + rhs, nil, true case parser.SUB: + if hlhs != nil && hrhs != nil { + // The histogram being subtracted must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + } + return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true + } return lhs - rhs, nil, true case parser.MUL: + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Mul(rhs), true + } + if hlhs == nil && hrhs != nil { + return 0, hrhs.Copy().Mul(lhs), true + } return lhs * rhs, nil, true case parser.DIV: + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Div(rhs), true + } return lhs / rhs, nil, true case parser.POW: return math.Pow(lhs, rhs), nil, true @@ -2452,7 +2516,8 @@ type groupedAggregation struct { labels labels.Labels floatValue float64 histogramValue *histogram.FloatHistogram - mean float64 + floatMean float64 + histogramMean *histogram.FloatHistogram groupCount int heap vectorByValueHeap reverseHeap vectorByReverseValueHeap @@ -2536,7 +2601,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without newAgg := &groupedAggregation{ labels: m, floatValue: s.F, - mean: s.F, + floatMean: s.F, groupCount: 1, } switch { @@ -2545,6 +2610,11 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case op == parser.SUM: newAgg.histogramValue = s.H.Copy() newAgg.hasHistogram = true + case op == parser.AVG: + newAgg.histogramMean = s.H.Copy() + newAgg.hasHistogram = true + case op == parser.STDVAR || op == parser.STDDEV: + newAgg.groupCount = 0 } result[groupingKey] = newAgg @@ -2589,9 +2659,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if s.H.Schema >= group.histogramValue.Schema { group.histogramValue.Add(s.H) } else { - h := s.H.Copy() - h.Add(group.histogramValue) - group.histogramValue = h + group.histogramValue = s.H.Copy().Add(group.histogramValue) } } // Otherwise the aggregation contained floats @@ -2604,25 +2672,46 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.AVG: group.groupCount++ - if math.IsInf(group.mean, 0) { - if math.IsInf(s.F, 0) && (group.mean > 0) == (s.F > 0) { - // The `mean` and `s.V` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `mean` is correct - // already. - break + if s.H != nil { + group.hasHistogram = true + if group.histogramMean != nil { + left := s.H.Copy().Div(float64(group.groupCount)) + right := group.histogramMean.Copy().Div(float64(group.groupCount)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if s.H.Schema >= group.histogramMean.Schema { + toAdd := right.Mul(-1).Add(left) + group.histogramMean.Add(toAdd) + } else { + toAdd := left.Sub(right) + group.histogramMean = toAdd.Add(group.histogramMean) + } } - if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { - // At this stage, the mean is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. - break + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + if math.IsInf(group.floatMean, 0) { + if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) { + // The `floatMean` and `s.F` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `floatMean` is correct + // already. + break + } + if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + break + } } + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount) } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.mean += s.F/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. @@ -2641,10 +2730,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group.groupCount++ case parser.STDVAR, parser.STDDEV: - group.groupCount++ - delta := s.F - group.mean - group.mean += delta / float64(group.groupCount) - group.floatValue += delta * (s.F - group.mean) + if s.H == nil { // Ignore native histograms. + group.groupCount++ + delta := s.F - group.floatMean + group.floatMean += delta / float64(group.groupCount) + group.floatValue += delta * (s.F - group.floatMean) + } case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. @@ -2696,7 +2787,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, aggr := range orderedResult { switch op { case parser.AVG: - aggr.floatValue = aggr.mean + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + // TODO(zenador): Issue warning when plumbing is in place. + continue + } + if aggr.hasHistogram { + aggr.histogramValue = aggr.histogramMean.Compact(0) + } else { + aggr.floatValue = aggr.floatMean + } case parser.COUNT, parser.COUNT_VALUES: aggr.floatValue = float64(aggr.groupCount) @@ -2739,8 +2839,12 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. + // TODO(zenador): Issue warning when plumbing is in place. continue } + if aggr.hasHistogram { + aggr.histogramValue.Compact(0) + } default: // For other aggregations, we already have the right value. } @@ -2781,7 +2885,7 @@ func btos(b bool) float64 { // result of the op operation. func shouldDropMetricName(op parser.ItemType) bool { switch op { - case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD: + case parser.ADD, parser.SUB, parser.DIV, parser.MUL, parser.POW, parser.MOD, parser.ATAN2: return true default: return false diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index df29d6c5d..5c39d6bd8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -162,7 +162,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod if resultHistogram == nil { resultFloat *= factor } else { - resultHistogram.Scale(factor) + resultHistogram.Mul(factor) } return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) @@ -443,15 +443,40 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) return append(enh.Out, Sample{F: aggrFn(el)}) } +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { + el := vals[0].(Matrix)[0] + + return append(enh.Out, Sample{H: aggrFn(el)}) +} + // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. avg_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { + // TODO(zenador): Add warning for mixed floats and histograms. return enh.Out } + if len(vals[0].(Matrix)[0].Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + count := 1 + mean := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + count++ + left := h.H.Copy().Div(float64(count)) + right := mean.Copy().Div(float64(count)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if h.H.Schema >= mean.Schema { + toAdd := right.Mul(-1).Add(left) + mean.Add(toAdd) + } else { + toAdd := left.Sub(right) + mean = toAdd.Add(mean) + } + } + return mean + }) + } return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 for _, f := range s.Floats { @@ -558,13 +583,26 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. sum_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { + // TODO(zenador): Add warning for mixed floats and histograms. return enh.Out } + if len(vals[0].(Matrix)[0].Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + sum := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + // The histogram being added must have + // an equal or larger schema. + if h.H.Schema >= sum.Schema { + sum.Add(h.H) + } else { + sum = h.H.Copy().Add(sum) + } + } + return sum + }) + } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 for _, f := range s.Floats { @@ -958,6 +996,72 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } +// === histogram_stddev(Vector parser.ValueTypeVector) Vector === +func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: math.Sqrt(variance), + }) + } + return enh.Out +} + +// === histogram_stdvar(Vector parser.ValueTypeVector) Vector === +func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: variance, + }) + } + return enh.Out +} + // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { lower := vals[0].(Vector)[0].F @@ -1339,6 +1443,8 @@ var FunctionCalls = map[string]FunctionCall{ "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go index 39933378e..aff6eb15b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/fuzz.go +++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go @@ -58,7 +58,7 @@ const ( ) func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType) + p, warning := textparse.New(in, contentType, false) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 450021328..45a30219e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -173,6 +173,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_stddev": { + Name: "histogram_stddev", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_stdvar": { + Name: "histogram_stdvar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, @@ -387,7 +397,7 @@ var Functions = map[string]*Function{ } // getFunction returns a predefined Function object for the given name. -func getFunction(name string) (*Function, bool) { - function, ok := Functions[name] +func getFunction(name string, functions map[string]*Function) (*Function, bool) { + function, ok := functions[name] return function, ok } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index b1c604eec..f7951db2b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -21,23 +21,28 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/model/histogram" ) %} %union { - node Node - item Item - matchers []*labels.Matcher - matcher *labels.Matcher - label labels.Label - labels labels.Labels - lblList []labels.Label - strings []string - series []SequenceValue - uint uint64 - float float64 - duration time.Duration + node Node + item Item + matchers []*labels.Matcher + matcher *labels.Matcher + label labels.Label + labels labels.Labels + lblList []labels.Label + strings []string + series []SequenceValue + histogram *histogram.FloatHistogram + descriptors map[string]interface{} + bucket_set []float64 + int int64 + uint uint64 + float float64 + duration time.Duration } @@ -54,6 +59,8 @@ IDENTIFIER LEFT_BRACE LEFT_BRACKET LEFT_PAREN +OPEN_HIST +CLOSE_HIST METRIC_IDENTIFIER NUMBER RIGHT_BRACE @@ -64,6 +71,20 @@ SPACE STRING TIMES +// Histogram Descriptors. +%token histogramDescStart +%token +SUM_DESC +COUNT_DESC +SCHEMA_DESC +OFFSET_DESC +NEGATIVE_OFFSET_DESC +BUCKETS_DESC +NEGATIVE_BUCKETS_DESC +ZERO_BUCKET_DESC +ZERO_BUCKET_WIDTH_DESC +%token histogramDescEnd + // Operators. %token operatorsStart %token @@ -145,6 +166,10 @@ START_METRIC_SELECTOR %type