diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 021c92d2f..a3f4287b3 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -12,7 +12,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: 1.20.x + go-version: 1.22.x - name: Restore cache uses: actions/cache@v3 with: @@ -29,7 +29,7 @@ jobs: test: strategy: matrix: - go-version: [1.20.x] + go-version: [1.22.x] platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: @@ -58,7 +58,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: 1.20.x + go-version: 1.22.x - name: Generate Docs run: | make generate-docs @@ -78,7 +78,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: 1.20.x + go-version: 1.22.x - name: Restore cache uses: actions/cache@v3 with: @@ -128,7 +128,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: 1.20.x + go-version: 1.22.x - name: Restore cache uses: actions/cache@v3 with: @@ -187,7 +187,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: 1.20.x + go-version: 1.22.x - name: Restore cache uses: actions/cache@v3 with: @@ -198,6 +198,7 @@ jobs: - name: Run GoReleaser uses: goreleaser/goreleaser-action@v5 with: + version: v2.6.1 install-only: true - name: Validate Goreleaser run: make goreleaser GORELEASER_ARGS="--skip=validate --clean --snapshot" # snapshot is needed as local git has no tags diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b8618d8b1..186e4f3f4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -23,7 +23,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: 1.20.x + go-version: 1.22.x - name: Import GPG key id: import_gpg uses: fastly/ghaction-import-gpg@v2.1.0 @@ -33,7 +33,7 @@ jobs: - name: Run GoReleaser uses: goreleaser/goreleaser-action@v5 with: - version: v1.21.2 # Last version to support Go 1.20.x + version: v2.6.1 args: release --clean env: GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} diff --git a/.goreleaser.yml b/.goreleaser.yml index 747b913d4..43220f554 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy @@ -24,7 +25,7 @@ builds: goarch: '386' binary: '{{ .ProjectName }}_v{{ .Version }}' archives: -- format: zip +- formats: [zip] name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' checksum: name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' @@ -44,4 +45,4 @@ signs: release: draft: true changelog: - skip: true + disable: true diff --git a/Makefile b/Makefile index 7e3a4611f..e1f27bd65 100644 --- a/Makefile +++ b/Makefile @@ -83,14 +83,13 @@ errcheck: @sh -c "'$(CURDIR)/scripts/errcheck.sh'" goreleaser-bin: - @# This is the last version of goreleaser that supports Go 1.20.14 (the version used to build the provider) - $(GO_BIN) install github.com/goreleaser/goreleaser@v1.21.2 + $(GO_BIN) install github.com/goreleaser/goreleaser/v2@latest nilaway: @nilaway ./... # You can pass flags to goreleaser via GORELEASER_ARGS -# --skip-validate will skip the checks +# --skip=validate will skip the checks # --clean will save you deleting the dist dir # --single-target will be quicker and only build for your os & architecture # e.g. diff --git a/README.md b/README.md index c8af81561..a70d45aeb 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ ## Requirements - [Terraform](https://www.terraform.io/downloads.html) 0.12.x or higher -- [Go](https://golang.org/doc/install) 1.20 (to build the provider plugin) +- [Go](https://golang.org/doc/install) 1.22 (to build the provider plugin) > NOTE: the last version of the Fastly provider to support Terraform 0.11.x and below was [v0.26.0](https://github.com/fastly/terraform-provider-fastly/releases/tag/v0.26.0) @@ -30,7 +30,7 @@ $ make build ## Developing the Provider -If you wish to work on the provider, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.20+ is *required*). +If you wish to work on the provider, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.22+ is *required*). To compile the provider, run `make build`. This will build the provider and put the provider binary in a local `bin` directory. diff --git a/docs/resources/domain_v1.md b/docs/resources/domain_v1.md new file mode 100644 index 000000000..f890379e4 --- /dev/null +++ b/docs/resources/domain_v1.md @@ -0,0 +1,29 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fastly_domain_v1 Resource - terraform-provider-fastly" +subcategory: "" +description: |- + +--- + +# fastly_domain_v1 (Resource) + + + + + + +## Schema + +### Required + +- `fqdn` (String) The fully-qualified domain name for your domain (e.g. `www.example.com`, no trailing dot). Can be created, but not updated. + +### Optional + +- `service_id` (String) The service_id associated with your domain or null if there is no association. + +### Read-Only + +- `domain_id` (String) The Domain Identifier (UUID). +- `id` (String) The ID of this resource. diff --git a/fastly/block_fastly_service_package.go b/fastly/block_fastly_service_package.go index ff3baca51..edd66e119 100644 --- a/fastly/block_fastly_service_package.go +++ b/fastly/block_fastly_service_package.go @@ -47,10 +47,10 @@ func (h *PackageServiceAttributeHandler) Register(s *schema.Resource) error { ExactlyOneOf: []string{"package.0.content", "package.0.filename"}, }, "source_code_hash": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Used to trigger updates. Must be set to a SHA512 hash of all files (in sorted order) within the package. The usual way to set this is using the fastly_package_hash data source.", + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Used to trigger updates. Must be set to a SHA512 hash of all files (in sorted order) within the package. The usual way to set this is using the fastly_package_hash data source.", }, }, }, diff --git a/fastly/provider.go b/fastly/provider.go index f2c2407d4..df3acd301 100644 --- a/fastly/provider.go +++ b/fastly/provider.go @@ -73,6 +73,7 @@ func Provider() *schema.Provider { "fastly_configstore": resourceFastlyConfigStore(), "fastly_configstore_entries": resourceFastlyConfigStoreEntries(), "fastly_custom_dashboard": resourceFastlyCustomDashboard(), + "fastly_domain_v1": resourceFastlyDomainV1(), "fastly_integration": resourceFastlyIntegration(), "fastly_kvstore": resourceFastlyKVStore(), "fastly_secretstore": resourceFastlySecretStore(), diff --git a/fastly/resource_fastly_domain_v1.go b/fastly/resource_fastly_domain_v1.go new file mode 100644 index 000000000..a0071b574 --- /dev/null +++ b/fastly/resource_fastly_domain_v1.go @@ -0,0 +1,122 @@ +package fastly + +import ( + "context" + "log" + + gofastly "github.com/fastly/go-fastly/v9/fastly" + v1 "github.com/fastly/go-fastly/v9/fastly/domains/v1" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceFastlyDomainV1() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceFastlyDomainV1Create, + ReadContext: resourceFastlyDomainV1Read, + UpdateContext: resourceFastlyDomainV1Update, + DeleteContext: resourceFastlyDomainV1Delete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "domain_id": { + Type: schema.TypeString, + Computed: true, + Description: "The Domain Identifier (UUID).", + }, + "fqdn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The fully-qualified domain name for your domain (e.g. `www.example.com`, no trailing dot). Can be created, but not updated.", + }, + "service_id": { + Type: schema.TypeString, + Optional: true, + Description: "The service_id associated with your domain or null if there is no association.", + }, + }, + } +} + +func resourceFastlyDomainV1Create(_ context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + conn := meta.(*APIClient).conn + + var input v1.CreateInput + if v, ok := d.GetOk("fqdn"); ok { + input.FQDN = gofastly.ToPointer(v.(string)) + } + if v, ok := d.GetOk("service_id"); ok { + input.ServiceID = gofastly.ToPointer(v.(string)) + } + + data, err := v1.Create(conn, &input) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(data.DomainID) + + if err := d.Set("domain_id", data.DomainID); err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceFastlyDomainV1Read(_ context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + log.Printf("[DEBUG] Refreshing Domain V1 Configuration for (%s)", d.Id()) + conn := meta.(*APIClient).conn + + input := &v1.GetInput{ + DomainID: gofastly.ToPointer(d.Id()), + } + + data, err := v1.Get(conn, input) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("domain_id", data.DomainID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("fqdn", data.FQDN); err != nil { + return diag.FromErr(err) + } + if err := d.Set("service_id", data.ServiceID); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func resourceFastlyDomainV1Update(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + conn := meta.(*APIClient).conn + + input := &v1.UpdateInput{ + DomainID: gofastly.ToPointer(d.Id()), + } + if v, ok := d.GetOk("service_id"); ok { + input.ServiceID = gofastly.ToPointer(v.(string)) + } + + _, err := v1.Update(conn, input) + if err != nil { + return diag.FromErr(err) + } + + return resourceFastlyDomainV1Read(ctx, d, meta) +} + +func resourceFastlyDomainV1Delete(_ context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + conn := meta.(*APIClient).conn + input := &v1.DeleteInput{ + DomainID: gofastly.ToPointer(d.Id()), + } + err := v1.Delete(conn, input) + if err != nil { + return diag.FromErr(err) + } + return nil +} diff --git a/fastly/resource_fastly_domain_v1_test.go b/fastly/resource_fastly_domain_v1_test.go new file mode 100644 index 000000000..1c04e3e5d --- /dev/null +++ b/fastly/resource_fastly_domain_v1_test.go @@ -0,0 +1,87 @@ +package fastly + +import ( + "fmt" + "testing" + + gofastly "github.com/fastly/go-fastly/v9/fastly" + v1 "github.com/fastly/go-fastly/v9/fastly/domains/v1" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccFastlyDomainV1_Basic(t *testing.T) { + domainName := fmt.Sprintf("fastly-test.tf-%s.com", acctest.RandString(10)) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + ProviderFactories: testAccProviders, + CheckDestroy: testAccCheckDomainV1Destroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(` + resource "fastly_domain_v1" "example" { + fqdn = "%s" + } + `, domainName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("fastly_domain_v1.example", "fqdn", domainName), + resource.TestCheckNoResourceAttr("fastly_domain_v1.example", "service_id"), + ), + }, + { + Config: fmt.Sprintf(` + resource "fastly_service_vcl" "example" { + name = "%s" + domain { + name = "%s" + } + force_destroy = true + } + resource "fastly_domain_v1" "example" { + fqdn = "%s" + service_id = resource.fastly_service_vcl.example.id + } + `, domainName, domainName, domainName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("fastly_domain_v1.example", "fqdn", domainName), + resource.TestCheckResourceAttrSet("fastly_domain_v1.example", "service_id"), + ), + }, + { + ResourceName: "fastly_domain_v1.example", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckDomainV1Destroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "fastly_domain_v1" { + continue + } + a := rs.Primary.Attributes + fqdn := a["fqdn"] + conn := testAccProvider.Meta().(*APIClient).conn + input := &v1.ListInput{ + FQDN: gofastly.ToPointer(fqdn), + } + cl, err := v1.List(conn, input) + if err != nil { + return fmt.Errorf("failed to list domains for fastly_domain_v1 resource: %w", err) + } + if cl != nil && len(cl.Data) > 0 { + for _, d := range cl.Data { + if d.FQDN == fqdn { + return fmt.Errorf("tried deleting domain (%s), but was still found", fqdn) + } + } + } + return nil + } + return nil +} diff --git a/go.mod b/go.mod index 9ff1cf248..843b2da11 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,28 @@ module github.com/fastly/terraform-provider-fastly -go 1.20 +go 1.22.0 + +toolchain go1.22.11 require ( github.com/bflad/tfproviderlint v0.30.0 - github.com/fastly/go-fastly/v9 v9.12.0 + github.com/fastly/go-fastly/v9 v9.13.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-docs v0.19.4 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.31.0 + golang.org/x/net v0.34.0 ) require ( - github.com/BurntSushi/toml v1.2.1 // indirect + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect - github.com/agext/levenshtein v1.2.3 // indirect + github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/bflad/gopaniccheck v0.1.0 // indirect @@ -28,6 +30,7 @@ require ( github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -59,10 +62,10 @@ require ( github.com/mattn/go-runewidth v0.0.9 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/oklog/run v1.1.0 // indirect + github.com/oklog/run v1.0.0 // indirect github.com/peterhellberg/link v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/posener/complete v1.2.3 // indirect @@ -75,17 +78,17 @@ require ( github.com/yuin/goldmark-meta v1.1.0 // indirect github.com/zclconf/go-cty v1.14.4 // indirect go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect - golang.org/x/crypto v0.29.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.29.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.0 // indirect - gopkg.in/yaml.v2 v2.3.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 0b5c92ec6..b9b0cf531 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,7 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0= github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -10,10 +11,11 @@ github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYr github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= -github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= @@ -28,25 +30,35 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= -github.com/fastly/go-fastly/v9 v9.12.0 h1:NUR4l+3LrSCux91sgKduFV8d/eejoGpQNlHa0ftKrFo= -github.com/fastly/go-fastly/v9 v9.12.0/go.mod h1:5w2jgJBZqQEebOwM/rRg7wutAcpDTziiMYWb/6qdM7U= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fastly/go-fastly/v9 v9.13.0 h1:/YpMLLquAruW2FZq01kY/VX5kouFCRMexwYfJbN/IWQ= +github.com/fastly/go-fastly/v9 v9.13.0/go.mod h1:oHzPMQCh93xygivG2qnri3XUdH7KDSUbAMv0+khzz3M= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -118,13 +130,18 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -141,28 +158,33 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/peterhellberg/link v1.2.0 h1:UA5pg3Gp/E0F2WdX7GERiNrPQrM1K6CVJUUWfHa4t6c= github.com/peterhellberg/link v1.2.0/go.mod h1:gYfAh+oJgQu2SrZHg5hROVRQe1ICoK0/HHJTcE0edxc= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= @@ -181,6 +203,7 @@ github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21 github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U= github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= @@ -189,32 +212,33 @@ github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBv github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -229,26 +253,27 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200214201135-548b770e2dfa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -267,8 +292,11 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 3651cfa96..639e6c399 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -9,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show v0.4.0`). -This library requires Go 1.13 or newer; add it to your go.mod with: +This library requires Go 1.18 or newer; add it to your go.mod with: % go get github.com/BurntSushi/toml@latest diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 0ca1dc4fe..c05a0b7e5 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -6,7 +6,7 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" + "io/fs" "math" "os" "reflect" @@ -18,13 +18,13 @@ import ( // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { - UnmarshalTOML(interface{}) error + UnmarshalTOML(any) error } // Unmarshal decodes the contents of data in TOML format into a pointer v. // // See [Decoder] for a description of the decoding process. -func Unmarshal(data []byte, v interface{}) error { +func Unmarshal(data []byte, v any) error { _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } @@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error { // Decode the TOML data in to the pointer v. // // See [Decoder] for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { +func Decode(data string, v any) (MetaData, error) { return NewDecoder(strings.NewReader(data)).Decode(v) } // DecodeFile reads the contents of a file and decodes it with [Decode]. -func DecodeFile(path string, v interface{}) (MetaData, error) { +func DecodeFile(path string, v any) (MetaData, error) { fp, err := os.Open(path) if err != nil { return MetaData{}, err @@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { return NewDecoder(fp).Decode(v) } +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // // This type can be used for any value, which will cause decoding to be delayed. @@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { // overhead of reflection. They can be useful when you don't know the exact type // of TOML data until runtime. type Primitive struct { - undecoded interface{} + undecoded any context Key } @@ -91,7 +102,7 @@ const ( // UnmarshalText method. See the Unmarshaler example for a demonstration with // email addresses. // -// ### Key mapping +// # Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go struct. // The special `toml` struct tag can be used to map TOML keys to struct fields @@ -122,7 +133,7 @@ var ( ) // Decode TOML data in to the pointer `v`. -func (dec *Decoder) Decode(v interface{}) (MetaData, error) { +func (dec *Decoder) Decode(v any) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { s := "%q" @@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } - // Check if this is a supported type: struct, map, interface{}, or something - // that implements UnmarshalTOML or UnmarshalText. + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. rv = indirect(rv) rt := rv.Type() if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && @@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { // TODO: parser should read from io.Reader? Or at the very least, make it // read from []byte rather than string - data, err := ioutil.ReadAll(dec.r) + data, err := io.ReadAll(dec.r) if err != nil { return MetaData{}, err } @@ -179,7 +190,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { // will only reflect keys that were decoded. Namely, any keys hidden behind a // Primitive will be considered undecoded. Executing this method will update the // undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { md.context = primValue.context defer func() { md.context = nil }() return md.unify(primValue.undecoded, rvalue(v)) @@ -190,7 +201,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { +func (md *MetaData) unify(data any, rv reflect.Value) error { // Special case. Look for a `Primitive` value. // TODO: #76 would make this superfluous after implemented. if rv.Type() == primitiveType { @@ -207,7 +218,11 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { rvi := rv.Interface() if v, ok := rvi.(Unmarshaler); ok { - return v.UnmarshalTOML(data) + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) + } + return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) @@ -227,14 +242,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return md.unifyInt(data, rv) } switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil case reflect.Struct: return md.unifyStruct(data, rv) case reflect.Map: @@ -248,7 +255,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) @@ -258,14 +265,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return md.e("unsupported type %s", rv.Kind()) } -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) if !ok { if mapping == nil { return nil } - return md.e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) } for key, datum := range tmap { @@ -304,14 +310,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { keyType := rv.Type().Key().Kind() if keyType != reflect.String && keyType != reflect.Interface { return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", keyType, rv.Type()) } - tmap, ok := mapping.(map[string]interface{}) + tmap, ok := mapping.(map[string]any) if !ok { if tmap == nil { return nil @@ -347,7 +353,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -361,7 +367,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { return md.unifySliceArray(datav, rv) } -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -388,7 +394,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { return nil } -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyString(data any, rv reflect.Value) error { _, ok := rv.Interface().(json.Number) if ok { if i, ok := data.(int64); ok { @@ -408,7 +414,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { return md.badtype("string", data) } -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { rvk := rv.Kind() if num, ok := data.(float64); ok { @@ -429,7 +435,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { if num, ok := data.(int64); ok { if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) } rv.SetFloat(float64(num)) return nil @@ -438,7 +444,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { return md.badtype("float", data) } -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { _, ok := rv.Interface().(time.Duration) if ok { // Parse as string duration, and fall back to regular integer parsing @@ -481,7 +487,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { if b, ok := data.(bool); ok { rv.SetBool(b) return nil @@ -489,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { return md.badtype("boolean", data) } -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { rv.Set(reflect.ValueOf(data)) return nil } -func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { case Marshaler: @@ -523,27 +529,29 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { - return err + return md.parseErr(err) } return nil } -func (md *MetaData) badtype(dst string, data interface{}) error { - return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) } func (md *MetaData) parseErr(err error) error { k := md.context.String() + d := string(md.data) return ParseError{ + Message: err.Error(), + err: err, LastKey: k, - Position: md.keyInfo[k].pos, + Position: md.keyInfo[k].pos.withCol(d), Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), + input: d, } } -func (md *MetaData) e(format string, args ...interface{}) error { +func (md *MetaData) e(format string, args ...any) error { f := "toml: " if len(md.context) > 0 { f = fmt.Sprintf("toml: (last key %q): ", md.context) @@ -556,7 +564,7 @@ func (md *MetaData) e(format string, args ...interface{}) error { } // rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { +func rvalue(v any) reflect.Value { return indirect(reflect.ValueOf(v)) } @@ -600,3 +608,8 @@ func isUnifiable(rv reflect.Value) bool { } return false } + +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go deleted file mode 100644 index 086d0b686..000000000 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package toml - -import ( - "io/fs" -) - -// DecodeFS reads the contents of a file from [fs.FS] and decodes it with -// [Decode]. -func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { - fp, err := fsys.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index c6af3f239..155709a80 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -5,17 +5,25 @@ import ( "io" ) +// TextMarshaler is an alias for encoding.TextMarshaler. +// // Deprecated: use encoding.TextMarshaler type TextMarshaler encoding.TextMarshaler +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// // Deprecated: use MetaData.PrimitiveDecode. -func PrimitiveDecode(primValue Primitive, v interface{}) error { +func PrimitiveDecode(primValue Primitive, v any) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } - -// Deprecated: use NewDecoder(reader).Decode(&value). -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index 81a7c0fe9..82c90a905 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -2,9 +2,6 @@ // // This package supports TOML v1.0.0, as specified at https://toml.io // -// There is also support for delaying decoding with the Primitive type, and -// querying the set of keys in a TOML document with the MetaData type. -// // The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, // and can be used to verify if TOML document is valid. It can also be used to // print the type of each key. diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 930e1d521..73366c0d9 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,6 +2,7 @@ package toml import ( "bufio" + "bytes" "encoding" "encoding/json" "errors" @@ -76,6 +77,17 @@ type Marshaler interface { MarshalTOML() ([]byte, error) } +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + // Encoder encodes a Go to a TOML document. // // The mapping between Go values and TOML values should be precisely the same as @@ -115,28 +127,24 @@ type Marshaler interface { // NOTE: only exported keys are encoded due to the use of reflection. Unexported // keys are silently discarded. type Encoder struct { - // String to use for a single indentation level; default is two spaces. - Indent string - + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? w *bufio.Writer - hasWritten bool // written any output to w yet? } // NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } + return &Encoder{w: bufio.NewWriter(w), Indent: " "} } // Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // // An error is returned if the value given cannot be encoded to a valid TOML // document. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { return err } return enc.w.Flush() @@ -279,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Float32: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) } @@ -303,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Interface: enc.eElement(rv.Elem()) default: - encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) } } @@ -457,6 +477,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // @@ -471,17 +501,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - if is32Bit { - copyStart := make([]int, len(start)) - copy(copyStart, start) - fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) } } } @@ -490,24 +510,27 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { writeFields := func(fields [][]int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + fieldVal := rv.FieldByIndex(fieldIndex) - if isNil(fieldVal) { /// Don't write anything for nil fields. + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { continue } - opts := getOptions(fieldType.Tag) - if opts.skip { + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && enc.isEmpty(fieldVal) { - continue - } if opts.omitzero && isZero(fieldVal) { continue } @@ -649,7 +672,7 @@ func isZero(rv reflect.Value) bool { return false } -func (enc *Encoder) isEmpty(rv reflect.Value) bool { +func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 @@ -664,13 +687,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool { // type b struct{ s []string } // s := a{field: b{s: []string{"AAA"}}} for i := 0; i < rv.NumField(); i++ { - if !enc.isEmpty(rv.Field(i)) { + if !isEmpty(rv.Field(i)) { return false } } return true case reflect.Bool: return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() } return false } @@ -693,8 +718,11 @@ func (enc *Encoder) newline() { // v v v v vv // key = {k = 1, k2 = 2} func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. if len(key) == 0 { - encPanic(errNoKey) + enc.eElement(val) + return } enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) @@ -703,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { } } -func (enc *Encoder) wf(format string, v ...interface{}) { +func (enc *Encoder) wf(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index f4f390e64..1dd523211 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -67,24 +67,39 @@ type ParseError struct { // Position of an error. type Position struct { Line int // Line number, starting at 1. + Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. + Len int // Lenght of the error in bytes. } -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll } + return p +} +func (pe ParseError) Error() string { if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) } return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) + pe.Position.Line, pe.LastKey, pe.Message) } -// ErrorWithUsage() returns the error with detailed location context. +// ErrorWithPosition returns the error with detailed location context. // // See the documentation on [ParseError]. func (pe ParseError) ErrorWithPosition() string { @@ -92,39 +107,41 @@ func (pe ParseError) ErrorWithPosition() string { return pe.Error() } + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + var ( lines = strings.Split(pe.input, "\n") - col = pe.column(lines) b = new(strings.Builder) ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - if pe.Position.Len == 1 { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) + pe.Message, pe.Position.Line, pe.Position.Col) } else { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) } if pe.Position.Line > 2 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) } if pe.Position.Line > 1 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) } - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } -// ErrorWithUsage() returns the error with detailed location context and usage +// ErrorWithUsage returns the error with detailed location context and usage // guidance. // // See the documentation on [ParseError]. @@ -142,34 +159,47 @@ func (pe ParseError) ErrorWithUsage() string { return m } -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' } - break + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 } - pos += ll } - - return col + return b.String() } type ( errLexControl struct{ r rune } errLexEscape struct{ r rune } errLexUTF8 struct{ b byte } - errLexInvalidNum struct{ v string } - errLexInvalidDate struct{ v string } + errParseDate struct{ v string } errLexInlineTableNL struct{} errLexStringNL struct{} errParseRange struct { - i interface{} // int or float - size string // "int64", "uint16", etc. + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" } errParseDuration struct{ d string } ) @@ -183,18 +213,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape func (e errLexEscape) Usage() string { return usageEscape } func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } func (e errLexUTF8) Usage() string { return "" } -func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } -func (e errLexInvalidNum) Usage() string { return "" } -func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } -func (e errLexInvalidDate) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } func (e errLexStringNL) Usage() string { return usageStringNewline } func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } func (e errParseRange) Usage() string { return usageIntOverflow } -func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } -func (e errParseDuration) Usage() string { return usageDuration } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } const usageEscape = ` A '\' inside a "-delimited string is interpreted as an escape character. @@ -251,19 +283,35 @@ bug in the program that uses too small of an integer. The maximum and minimum values are: size β”‚ lowest β”‚ highest - ───────┼────────────────┼────────── + ───────┼────────────────┼────────────── int8 β”‚ -128 β”‚ 127 int16 β”‚ -32,768 β”‚ 32,767 int32 β”‚ -2,147,483,648 β”‚ 2,147,483,647 int64 β”‚ -9.2 Γ— 10¹⁷ β”‚ 9.2 Γ— 10¹⁷ uint8 β”‚ 0 β”‚ 255 - uint16 β”‚ 0 β”‚ 65535 - uint32 β”‚ 0 β”‚ 4294967295 + uint16 β”‚ 0 β”‚ 65,535 + uint32 β”‚ 0 β”‚ 4,294,967,295 uint64 β”‚ 0 β”‚ 1.8 Γ— 10¹⁸ int refers to int32 on 32-bit systems and int64 on 64-bit systems. ` +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + const usageDuration = ` A duration must be as "number", without any spaces. Valid units are: @@ -277,3 +325,23 @@ A duration must be as "number", without any spaces. Valid units are: You can combine multiple units; for example "5m10s" for 5 minutes and 10 seconds. ` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index d4d70871d..6878d9d69 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -17,6 +17,7 @@ const ( itemEOF itemText itemString + itemStringEsc itemRawString itemMultilineString itemRawMultilineString @@ -46,12 +47,14 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -87,13 +90,14 @@ func (lx *lexer) nextItem() item { } } -func lex(input string) *lexer { +func lex(input string, tomlNext bool) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, } return lx } @@ -162,7 +166,7 @@ func (lx *lexer) next() (r rune) { } r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - if r == utf8.RuneError { + if r == utf8.RuneError && w == 1 { lx.error(errLexUTF8{lx.input[lx.pos]}) return utf8.RuneError } @@ -268,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { } // errorf is like error, and creates a new error. -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { +func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() pos.Line-- @@ -331,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf( - "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", - r) + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r) { + if isBareKeyChar(r, lx.tomlNext) { return lexBareName } lx.backup() @@ -490,6 +492,9 @@ func lexKeyEnd(lx *lexer) stateFn { lx.emit(itemKeyEnd) return lexSkip(lx, lexValue) default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -558,6 +563,9 @@ func lexValue(lx *lexer) stateFn { if r == eof { return lx.errorf("unexpected EOF; expected value") } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } return lx.errorf("expected value but found %q instead", r) } @@ -618,6 +626,9 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValue) @@ -640,6 +651,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValueEnd) @@ -648,6 +662,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } return lx.errorf("trailing comma not allowed in inline tables") } return lexInlineTableValue @@ -687,7 +704,12 @@ func lexString(lx *lexer) stateFn { return lexStringEscape case r == '"': lx.backup() - lx.emit(itemString) + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } lx.next() lx.ignore() return lx.pop() @@ -737,6 +759,7 @@ func lexMultilineString(lx *lexer) stateFn { lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() + lx.esc = false lx.emit(itemMultilineString) lx.next() /// Read over ''' again and discard it. lx.next() @@ -770,8 +793,8 @@ func lexRawString(lx *lexer) stateFn { } } -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning ''' has already been consumed and +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() @@ -826,8 +849,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn { } func lexStringEscape(lx *lexer) stateFn { + lx.esc = true r := lx.next() switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough case 'b': fallthrough case 't': @@ -846,6 +875,11 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '\\': return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape case 'u': return lexShortUnicodeEscape case 'U': @@ -854,14 +888,23 @@ func lexStringEscape(lx *lexer) stateFn { return lx.error(errLexEscape{r}) } +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected four hexadecimal digits after '\u', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) } } return lx.pop() @@ -871,10 +914,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 8; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected eight hexadecimal digits after '\U', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) } } return lx.pop() @@ -941,7 +982,7 @@ func lexDatetime(lx *lexer) stateFn { // lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if isHexadecimal(r) { + if isHex(r) { return lexHexInteger } switch r { @@ -1075,7 +1116,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { return lexOctalInteger case 'x': r = lx.peek() - if !isHexadecimal(r) { + if !isHex(r) { lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) } return lexHexInteger @@ -1173,7 +1214,7 @@ func (itype itemType) String() string { return "EOF" case itemText: return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: return "String" case itemBool: return "Bool" @@ -1206,7 +1247,7 @@ func (itype itemType) String() string { } func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) + return fmt.Sprintf("(%s, %s)", item.typ, item.val) } func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } @@ -1222,10 +1263,23 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') -} -func isBareKeyChar(r rune) bool { +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 71847a041..e61453730 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -13,7 +13,7 @@ type MetaData struct { context Key // Used only during decoding. keyInfo map[string]keyInfo - mapping map[string]interface{} + mapping map[string]any keys []Key decoded map[string]struct{} data []byte // Input file; for errors. @@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool { } var ( - hash map[string]interface{} + hash map[string]any ok bool - hashOrVal interface{} = md.mapping + hashOrVal any = md.mapping ) for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { + if hash, ok = hashOrVal.(map[string]any); !ok { return false } if hashOrVal, ok = hash[k]; !ok { @@ -94,28 +94,55 @@ func (md *MetaData) Undecoded() []Key { type Key []string func (k Key) String() string { - ss := make([]string, len(k)) - for i := range k { - ss[i] = k.maybeQuoted(i) + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } } - return strings.Join(ss, ".") + return b.String() } func (k Key) maybeQuoted(i int) string { if k[i] == "" { return `""` } - for _, c := range k[i] { - if !isBareKeyChar(c) { - return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } return k[i] } +// Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { + if cap(k) > len(k) { + return append(k, piece) + } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece return newKey } + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index d2542d6f9..3f2c090c8 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,8 @@ package toml import ( "fmt" + "math" + "os" "strconv" "strings" "time" @@ -15,12 +17,13 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. + tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. - keyInfo map[string]keyInfo // Map keyname β†’ info about the TOML key. - mapping map[string]interface{} // Map keyname β†’ key value. - implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). + keyInfo map[string]keyInfo // Map keyname β†’ info about the TOML key. + mapping map[string]any // Map keyname β†’ key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } type keyInfo struct { @@ -29,6 +32,8 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -41,9 +46,13 @@ func parse(data string) (p *parser, err error) { }() // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] } // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 @@ -56,7 +65,7 @@ func parse(data string) (p *parser, err error) { if i := strings.IndexRune(data[:ex], 0); i > -1 { return nil, ParseError{ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Line: 1, input: data, } @@ -64,10 +73,11 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), - mapping: make(map[string]interface{}), - lx: lex(data), + mapping: make(map[string]any), + lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), + tomlNext: tomlNext, } for { item := p.next() @@ -82,26 +92,27 @@ func parse(data string) (p *parser, err error) { func (p *parser) panicErr(it item, err error) { panic(ParseError{ + Message: err.Error(), err: err, - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) } -func (p *parser) panicItemf(it item, format string, v ...interface{}) { +func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) } -func (p *parser) panicf(format string, v ...interface{}) { +func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: p.pos, + Position: p.pos.withCol(p.lx.input), Line: p.pos.Line, LastKey: p.current(), }) @@ -113,10 +124,11 @@ func (p *parser) next() item { if it.typ == itemError { if it.err != nil { panic(ParseError{ - Position: it.pos, + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Line, LastKey: p.current(), - err: it.err, }) } @@ -131,7 +143,7 @@ func (p *parser) nextPos() item { return it } -func (p *parser) bug(format string, v ...interface{}) { +func (p *parser) bug(format string, v ...any) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -186,20 +198,21 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set value. vItem := p.next() val, typ := p.value(vItem, false) - p.set(p.currentKey, val, typ, vItem.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -214,7 +227,7 @@ func (p *parser) keyString(it item) string { switch it.typ { case itemText: return it.val - case itemString, itemMultilineString, + case itemString, itemStringEsc, itemMultilineString, itemRawString, itemRawMultilineString: s, _ := p.value(it, false) return s.(string) @@ -231,12 +244,14 @@ var datetimeRepl = strings.NewReplacer( // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { switch it.typ { case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -266,7 +281,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { panic("unreachable") } -func (p *parser) valueInteger(it item) (interface{}, tomlType) { +func (p *parser) valueInteger(it item) (any, tomlType) { if !numUnderscoresOK(it.val) { p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) } @@ -290,7 +305,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { return num, p.typeOfPrimitive(it) } -func (p *parser) valueFloat(it item) (interface{}, tomlType) { +func (p *parser) valueFloat(it item) (any, tomlType) { parts := strings.FieldsFunc(it.val, func(r rune) bool { switch r { case '.', 'e', 'E': @@ -314,7 +329,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) } val := strings.Replace(it.val, "_", "", -1) - if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" val = "nan" } num, err := strconv.ParseFloat(val, 64) @@ -325,20 +342,29 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float value: %q", it.val) } } + if signbit { + num = math.Copysign(num, -1) + } return num, p.typeOfPrimitive(it) } var dtTypes = []struct { fmt string zone *time.Location + next bool }{ - {time.RFC3339Nano, time.Local}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, - {"2006-01-02", internal.LocalDate}, - {"15:04:05.999999999", internal.LocalTime}, + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, } -func (p *parser) valueDatetime(it item) (interface{}, tomlType) { +func (p *parser) valueDatetime(it item) (any, tomlType) { it.val = datetimeRepl.Replace(it.val) var ( t time.Time @@ -346,28 +372,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { err error ) for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } ok = true break } } if !ok { - p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + p.panicErr(it, errParseDate{it.val}) } return t, p.typeOfPrimitive(it) } -func (p *parser) valueArray(it item) (interface{}, tomlType) { +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} + +func (p *parser) valueArray(it item) (any, tomlType) { p.setType(p.currentKey, tomlArray, it.pos) var ( - types []tomlType - - // Initialize to a non-nil empty slice. This makes it consistent with - // how S = [] decodes into a non-nil slice inside something like struct - // { S []string }. See #338 - array = []interface{}{} + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) ) for it = p.next(); it.typ != itemArrayEnd; it = p.next() { if it.typ == itemCommentStart { @@ -377,20 +424,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { val, typ := p.value(it, true) array = append(array, val) - types = append(types, typ) - // XXX: types isn't used here, we need it to record the accurate type + // XXX: type isn't used here, we need it to record the accurate type // information. // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? + _ = typ } return array, tomlArray } -func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { var ( - hash = make(map[string]interface{}) + topHash = make(map[string]any) outerContext = p.context outerKey = p.currentKey ) @@ -418,19 +465,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set the value. val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ, it.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } hash[p.currentKey] = val /// Restore context. @@ -438,7 +499,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom } p.context = outerContext p.currentKey = outerKey - return hash, tomlHash + return topHash, tomlHash } // numHasLeadingZero checks if this number has leading zeroes, allowing for '0', @@ -468,9 +529,9 @@ func numUnderscoresOK(s string) bool { } } - // isHexadecimal is a superset of all the permissable characters - // surrounding an underscore. - accept = isHexadecimal(r) + // isHexis a superset of all the permissable characters surrounding an + // underscore. + accept = isHex(r) } return accept } @@ -493,21 +554,19 @@ func numPeriodsOK(s string) bool { // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. func (p *parser) addContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. + /// Always start at the top level and drill down for our context. hashContext := p.mapping - keyContext := make(Key, 0) + keyContext := make(Key, 0, len(key)-1) - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] keyContext = append(keyContext, k) // No key? Make an implicit hash and move on. if !ok { p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) + hashContext[k] = make(map[string]any) } // If the hash context is actually an array of tables, then set @@ -516,9 +575,9 @@ func (p *parser) addContext(key Key, array bool) { // Otherwise, it better be a table, since this MUST be a key group (by // virtue of it not being the last element in a key). switch t := hashContext[k].(type) { - case []map[string]interface{}: + case []map[string]any: hashContext = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hashContext = t default: p.panicf("Key '%s' was already created as a hash.", keyContext) @@ -529,40 +588,33 @@ func (p *parser) addContext(key Key, array bool) { if array { // If this is the first element for this array, then allocate a new // list of tables for it. - k := key[len(key)-1] + k := key.last() if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 4) + hashContext[k] = make([]map[string]any, 0, 4) } // Add a new table. But make sure the key hasn't already been used // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) } else { p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) + p.setValue(key.last(), make(map[string]any)) } - p.context = append(p.context, key[len(key)-1]) -} - -// set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { - p.setValue(key, val) - p.setType(key, typ, pos) - + p.context = append(p.context, key.last()) } // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. -func (p *parser) setValue(key string, value interface{}) { +func (p *parser) setValue(key string, value any) { var ( - tmpHash interface{} + tmpHash any ok bool hash = p.mapping - keyContext Key + keyContext = make(Key, 0, len(p.context)+1) ) for _, k := range p.context { keyContext = append(keyContext, k) @@ -570,11 +622,11 @@ func (p *parser) setValue(key string, value interface{}) { p.bug("Context for key '%s' has not been established.", keyContext) } switch t := tmpHash.(type) { - case []map[string]interface{}: + case []map[string]any: // The context is a table of hashes. Pick the most recent table // defined as the current hash. hash = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hash = t default: p.panicf("Key '%s' has already been defined.", keyContext) @@ -601,9 +653,8 @@ func (p *parser) setValue(key string, value interface{}) { p.removeImplicit(keyContext) return } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } @@ -632,14 +683,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) { // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } -func (p *parser) addImplicitContext(key Key) { - p.addImplicit(key) - p.addContext(key, false) -} +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } // current returns the full key name of the current context. func (p *parser) current() string { @@ -662,114 +710,131 @@ func stripFirstNewline(s string) string { return s } -// Remove newlines inside triple-quoted strings if a line ends with "\". +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. func (p *parser) stripEscapedNewlines(s string) string { - split := strings.Split(s, "\n") - if len(split) < 1 { - return s - } - - escNL := false // Keep track of the last non-blank line was escaped. - for i, line := range split { - line = strings.TrimRight(line, " \t\r") - - if len(line) == 0 || line[len(line)-1] != '\\' { - split[i] = strings.TrimRight(split[i], "\r") - if !escNL && i != len(split)-1 { - split[i] += "\n" - } - continue + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() } + i += ix - escBS := true - for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { - escBS = !escBS + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue } - if escNL { - line = strings.TrimLeft(line, " \t\r") + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } } - escNL = !escBS - - if escBS { - split[i] += "\n" + if j == i+1 { + // Not a whitespace escape. + i++ continue } - - if i == len(split)-1 { - p.panicf("invalid escape: '\\ '") - } - - split[i] = line[:len(line)-1] // Remove \ - if len(split)-1 > i { - split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) + i++ + continue } + b.WriteString(s[:i]) + s = s[j:] + i = 0 } - return strings.Join(split, "") } func (p *parser) replaceEscapes(it item, str string) string { - replaced := make([]rune, 0, len(str)) - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- + continue + } + if c != '\\' { + b.WriteRune(c) continue } - r += 1 - if r >= len(s) { + + if i >= len(str) { p.bug("Escape sequence at end of string.") return "" } - switch s[r] { + switch str[i+1] { default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) case ' ', '\t': - p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 + b.WriteByte(0x08) + skip = 1 case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 + b.WriteByte(0x09) + skip = 1 case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 + b.WriteByte(0x0a) + skip = 1 case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 + b.WriteByte(0x0c) + skip = 1 case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 + b.WriteByte(0x0d) + skip = 1 + case 'e': + if p.tomlNext { + b.WriteByte(0x1b) + skip = 1 + } case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 + b.WriteByte(0x22) + skip = 1 case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 + } case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) - replaced = append(replaced, escaped) - r += 5 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) - replaced = append(replaced, escaped) - r += 9 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 } } - return string(replaced) + return b.String() } -func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { - s := string(bs) +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 254ca82e5..10c51f7ee 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -25,10 +25,8 @@ type field struct { // breaking ties with index sequence. type byName []field -func (x byName) Len() int { return len(x) } - +func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name @@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool { // byIndex sorts field by index sequence. type byIndex []field -func (x byIndex) Len() int { return len(x) } - +func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go index 4e90d7737..1c090d331 100644 --- a/vendor/github.com/BurntSushi/toml/type_toml.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool { type tomlBaseType string -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } var ( tomlInteger tomlBaseType = "Integer" @@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { return tomlFloat case itemDatetime: return tomlDatetime - case itemString: + case itemString, itemStringEsc: return tomlString case itemMultilineString: return tomlString diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml index 68d38816f..a51a14466 100644 --- a/vendor/github.com/agext/levenshtein/.travis.yml +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -3,11 +3,8 @@ sudo: false matrix: fast_finish: true include: - - go: 1.14.x - env: TEST_METHOD=goveralls - - go: 1.13.x - - go: 1.12.x - go: 1.11.x + env: TEST_METHOD=goveralls - go: 1.10.x - go: tip - go: 1.9.x @@ -17,8 +14,6 @@ matrix: - go: 1.5.x allow_failures: - go: tip - - go: 1.11.x - - go: 1.10.x - go: 1.9.x - go: 1.8.x - go: 1.7.x diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md index d9a8ce16d..9e4255879 100644 --- a/vendor/github.com/agext/levenshtein/README.md +++ b/vendor/github.com/agext/levenshtein/README.md @@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th ## Project Status -v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. +v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go index 56d719b83..df69ce701 100644 --- a/vendor/github.com/agext/levenshtein/levenshtein.go +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -108,7 +108,7 @@ func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, for x := 0; x < l2; x++ { dy, d[doff] = d[doff], d[doff]+insCost - for doff < l1 && d[doff] > maxCost && dlen > 0 { + for d[doff] > maxCost && dlen > 0 { if str1[doff] != str2[x] { dy += subCost } diff --git a/vendor/github.com/dnaeon/go-vcr/LICENSE b/vendor/github.com/dnaeon/go-vcr/LICENSE new file mode 100644 index 000000000..9a4613208 --- /dev/null +++ b/vendor/github.com/dnaeon/go-vcr/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2015-2016 Marin Atanasov Nikolov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer + in this position and unchanged. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dnaeon/go-vcr/cassette/cassette.go b/vendor/github.com/dnaeon/go-vcr/cassette/cassette.go new file mode 100644 index 000000000..e610dee8d --- /dev/null +++ b/vendor/github.com/dnaeon/go-vcr/cassette/cassette.go @@ -0,0 +1,241 @@ +// Copyright (c) 2015 Marin Atanasov Nikolov +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer +// in this position and unchanged. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package cassette + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "sync" + + "gopkg.in/yaml.v2" +) + +// Cassette format versions +const ( + cassetteFormatV1 = 1 +) + +var ( + // ErrInteractionNotFound indicates that a requested + // interaction was not found in the cassette file + ErrInteractionNotFound = errors.New("Requested interaction not found") +) + +// Request represents a client request as recorded in the +// cassette file +type Request struct { + // Body of request + Body string `yaml:"body"` + + // Form values + Form url.Values `yaml:"form"` + + // Request headers + Headers http.Header `yaml:"headers"` + + // Request URL + URL string `yaml:"url"` + + // Request method + Method string `yaml:"method"` +} + +// Response represents a server response as recorded in the +// cassette file +type Response struct { + // Body of response + Body string `yaml:"body"` + + // Response headers + Headers http.Header `yaml:"headers"` + + // Response status message + Status string `yaml:"status"` + + // Response status code + Code int `yaml:"code"` + + // Response duration (something like "100ms" or "10s") + Duration string `yaml:"duration"` + + replayed bool +} + +// Interaction type contains a pair of request/response for a +// single HTTP interaction between a client and a server +type Interaction struct { + Request `yaml:"request"` + Response `yaml:"response"` +} + +// Matcher function returns true when the actual request matches +// a single HTTP interaction's request according to the function's +// own criteria. +type Matcher func(*http.Request, Request) bool + +// DefaultMatcher is used when a custom matcher is not defined +// and compares only the method and URL. +func DefaultMatcher(r *http.Request, i Request) bool { + return r.Method == i.Method && r.URL.String() == i.URL +} + +// Filter function allows modification of an interaction before saving. +type Filter func(*Interaction) error + +// Cassette type +type Cassette struct { + // Name of the cassette + Name string `yaml:"-"` + + // File name of the cassette as written on disk + File string `yaml:"-"` + + // Cassette format version + Version int `yaml:"version"` + + // Mutex to lock accessing Interactions. omitempty is set + // to prevent the mutex appearing in the recorded YAML. + Mu sync.RWMutex `yaml:"mu,omitempty"` + // Interactions between client and server + Interactions []*Interaction `yaml:"interactions"` + // ReplayableInteractions defines whether to allow interactions to be replayed or not + ReplayableInteractions bool `yaml:"-"` + + // Matches actual request with interaction requests. + Matcher Matcher `yaml:"-"` + + // Filters interactions before when they are captured. + Filters []Filter `yaml:"-"` + + // SaveFilters are applied to interactions just before they are saved. + SaveFilters []Filter `yaml:"-"` +} + +// New creates a new empty cassette +func New(name string) *Cassette { + c := &Cassette{ + Name: name, + File: fmt.Sprintf("%s.yaml", name), + Version: cassetteFormatV1, + Interactions: make([]*Interaction, 0), + Matcher: DefaultMatcher, + Filters: make([]Filter, 0), + SaveFilters: make([]Filter, 0), + } + + return c +} + +// Load reads a cassette file from disk +func Load(name string) (*Cassette, error) { + c := New(name) + data, err := ioutil.ReadFile(c.File) + if err != nil { + return nil, err + } + + err = yaml.Unmarshal(data, &c) + + return c, err +} + +// AddInteraction appends a new interaction to the cassette +func (c *Cassette) AddInteraction(i *Interaction) { + c.Mu.Lock() + c.Interactions = append(c.Interactions, i) + c.Mu.Unlock() +} + +// GetInteraction retrieves a recorded request/response interaction +func (c *Cassette) GetInteraction(r *http.Request) (*Interaction, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + for _, i := range c.Interactions { + if (c.ReplayableInteractions || !i.replayed) && c.Matcher(r, i.Request) { + i.replayed = true + return i, nil + } + } + + return nil, ErrInteractionNotFound +} + +// Save writes the cassette data on disk for future re-use +func (c *Cassette) Save() error { + c.Mu.RLock() + defer c.Mu.RUnlock() + // Save cassette file only if there were any interactions made + if len(c.Interactions) == 0 { + return nil + } + + for _, interaction := range c.Interactions { + for _, filter := range c.SaveFilters { + if err := filter(interaction); err != nil { + return err + } + } + } + + // Create directory for cassette if missing + cassetteDir := filepath.Dir(c.File) + if _, err := os.Stat(cassetteDir); os.IsNotExist(err) { + if err = os.MkdirAll(cassetteDir, 0755); err != nil { + return err + } + } + + // Marshal to YAML and save interactions + data, err := yaml.Marshal(c) + if err != nil { + return err + } + + f, err := os.Create(c.File) + if err != nil { + return err + } + + defer f.Close() + + // Honor the YAML structure specification + // http://www.yaml.org/spec/1.2/spec.html#id2760395 + _, err = f.Write([]byte("---\n")) + if err != nil { + return err + } + + _, err = f.Write(data) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/dnaeon/go-vcr/recorder/go17_nobody.go b/vendor/github.com/dnaeon/go-vcr/recorder/go17_nobody.go new file mode 100644 index 000000000..465961abb --- /dev/null +++ b/vendor/github.com/dnaeon/go-vcr/recorder/go17_nobody.go @@ -0,0 +1,37 @@ +// Copyright (c) 2015-2016 Marin Atanasov Nikolov +// Copyright (c) 2016 David Jack +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer +// in this position and unchanged. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// +build !go1.8 + +package recorder + +import ( + "io" +) + +// isNoBody returns true iff r is an http.NoBody. +// http.NoBody didn't exist before Go 1.7, so the version in this file +// always returns false. +func isNoBody(r io.ReadCloser) bool { return false } diff --git a/vendor/github.com/dnaeon/go-vcr/recorder/go18_nobody.go b/vendor/github.com/dnaeon/go-vcr/recorder/go18_nobody.go new file mode 100644 index 000000000..062bbe3a4 --- /dev/null +++ b/vendor/github.com/dnaeon/go-vcr/recorder/go18_nobody.go @@ -0,0 +1,36 @@ +// Copyright (c) 2015-2016 Marin Atanasov Nikolov +// Copyright (c) 2016 David Jack +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer +// in this position and unchanged. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// +build go1.8 + +package recorder + +import ( + "io" + "net/http" +) + +// isNoBody returns true iff r is an http.NoBody. +func isNoBody(r io.ReadCloser) bool { return r == http.NoBody } diff --git a/vendor/github.com/dnaeon/go-vcr/recorder/recorder.go b/vendor/github.com/dnaeon/go-vcr/recorder/recorder.go new file mode 100644 index 000000000..8e0255378 --- /dev/null +++ b/vendor/github.com/dnaeon/go-vcr/recorder/recorder.go @@ -0,0 +1,322 @@ +// Copyright (c) 2015-2016 Marin Atanasov Nikolov +// Copyright (c) 2016 David Jack +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer +// in this position and unchanged. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package recorder + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "os" + "strconv" + "time" + + "github.com/dnaeon/go-vcr/cassette" +) + +// Mode represents recording/playback mode +type Mode int + +// Recorder states +const ( + ModeRecording Mode = iota + ModeReplaying + ModeDisabled + // Replay record from cassette or record a new one when a request is not + // present in cassette instead of throwing ErrInteractionNotFound + ModeReplayingOrRecording +) + +// Recorder represents a type used to record and replay +// client and server interactions +type Recorder struct { + // Operating mode of the recorder + mode Mode + + // Cassette used by the recorder + cassette *cassette.Cassette + + // realTransport is the underlying http.RoundTripper to make real requests + realTransport http.RoundTripper + + // Pass through requests. + Passthroughs []Passthrough +} + +// Passthrough function allows ignoring certain requests. +type Passthrough func(*http.Request) bool + +// SetTransport can be used to configure the behavior of the 'real' client used in record-mode +func (r *Recorder) SetTransport(t http.RoundTripper) { + r.realTransport = t +} + +// Proxies client requests to their original destination +func requestHandler(r *http.Request, c *cassette.Cassette, mode Mode, realTransport http.RoundTripper) (*cassette.Interaction, error) { + // Return interaction from cassette if in replay mode or replay/record mode + if mode == ModeReplaying || mode == ModeReplayingOrRecording { + if err := r.Context().Err(); err != nil { + return nil, err + } + + if interaction, err := c.GetInteraction(r); mode == ModeReplaying { + return interaction, err + } else if mode == ModeReplayingOrRecording && err == nil { + return interaction, err + } + } + + // Copy the original request, so we can read the form values + reqBytes, err := httputil.DumpRequestOut(r, true) + if err != nil { + return nil, err + } + + reqBuffer := bytes.NewBuffer(reqBytes) + copiedReq, err := http.ReadRequest(bufio.NewReader(reqBuffer)) + if err != nil { + return nil, err + } + + err = copiedReq.ParseForm() + if err != nil { + return nil, err + } + + reqBody := &bytes.Buffer{} + if r.Body != nil && !isNoBody(r.Body) { + // Record the request body so we can add it to the cassette + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, reqBody)) + } + + // Perform client request to it's original + // destination and record interactions + resp, err := realTransport.RoundTrip(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + // Add interaction to cassette + interaction := &cassette.Interaction{ + Request: cassette.Request{ + Body: reqBody.String(), + Form: copiedReq.PostForm, + Headers: r.Header, + URL: r.URL.String(), + Method: r.Method, + }, + Response: cassette.Response{ + Body: string(respBody), + Headers: resp.Header, + Status: resp.Status, + Code: resp.StatusCode, + }, + } + for _, filter := range c.Filters { + err = filter(interaction) + if err != nil { + return nil, err + } + } + c.AddInteraction(interaction) + + return interaction, nil +} + +// New creates a new recorder +func New(cassetteName string) (*Recorder, error) { + // Default mode is "replay" if file exists + return NewAsMode(cassetteName, ModeReplaying, nil) +} + +// NewAsMode creates a new recorder in the specified mode +func NewAsMode(cassetteName string, mode Mode, realTransport http.RoundTripper) (*Recorder, error) { + var c *cassette.Cassette + cassetteFile := fmt.Sprintf("%s.yaml", cassetteName) + + if mode != ModeDisabled { + // Depending on whether the cassette file exists or not we + // either create a new empty cassette or load from file + if _, err := os.Stat(cassetteFile); os.IsNotExist(err) || mode == ModeRecording { + // Create new cassette and enter in recording mode + c = cassette.New(cassetteName) + mode = ModeRecording + } else { + // Load cassette from file and enter replay mode or replay/record mode + c, err = cassette.Load(cassetteName) + if err != nil { + return nil, err + } + } + } + + if realTransport == nil { + realTransport = http.DefaultTransport + } + + r := &Recorder{ + mode: mode, + cassette: c, + realTransport: realTransport, + } + + return r, nil +} + +// Stop is used to stop the recorder and save any recorded interactions +func (r *Recorder) Stop() error { + if r.mode == ModeRecording || r.mode == ModeReplayingOrRecording { + if err := r.cassette.Save(); err != nil { + return err + } + } + + return nil +} + +// RoundTrip implements the http.RoundTripper interface +func (r *Recorder) RoundTrip(req *http.Request) (*http.Response, error) { + if r.mode == ModeDisabled { + return r.realTransport.RoundTrip(req) + } + for _, passthrough := range r.Passthroughs { + if passthrough(req) { + return r.realTransport.RoundTrip(req) + } + } + + // Pass cassette and mode to handler, so that interactions can be + // retrieved or recorded depending on the current recorder mode + interaction, err := requestHandler(req, r.cassette, r.mode, r.realTransport) + + if err != nil { + return nil, err + } + + select { + case <-req.Context().Done(): + return nil, req.Context().Err() + default: + buf := bytes.NewBuffer([]byte(interaction.Response.Body)) + // apply the duration defined in the interaction + if interaction.Response.Duration != "" { + d, err := time.ParseDuration(interaction.Duration) + if err != nil { + return nil, err + } + // block for the configured 'duration' to simulate the network latency and server processing time. + <-time.After(d) + } + + contentLength := int64(buf.Len()) + // For HTTP HEAD requests, the ContentLength should be set to the size + // of the body that would have been sent for a GET. + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + if req.Method == "HEAD" { + if hdr := interaction.Response.Headers.Get("Content-Length"); hdr != "" { + cl, err := strconv.ParseInt(hdr, 10, 64) + if err == nil { + contentLength = cl + } + } + } + return &http.Response{ + Status: interaction.Response.Status, + StatusCode: interaction.Response.Code, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: req, + Header: interaction.Response.Headers, + Close: true, + ContentLength: contentLength, + Body: ioutil.NopCloser(buf), + }, nil + } +} + +// CancelRequest implements the github.com/coreos/etcd/client.CancelableTransport interface +func (r *Recorder) CancelRequest(req *http.Request) { + type cancelableTransport interface { + CancelRequest(req *http.Request) + } + if ct, ok := r.realTransport.(cancelableTransport); ok { + ct.CancelRequest(req) + } +} + +// SetMatcher sets a function to match requests against recorded HTTP interactions. +func (r *Recorder) SetMatcher(matcher cassette.Matcher) { + if r.cassette != nil { + r.cassette.Matcher = matcher + } +} + +// SetReplayableInteractions defines whether to allow interactions to be replayed or not. +func (r *Recorder) SetReplayableInteractions(replayable bool) { + if r.cassette != nil { + r.cassette.ReplayableInteractions = replayable + } +} + +// AddPassthrough appends a hook to determine if a request should be ignored by the +// recorder. +func (r *Recorder) AddPassthrough(pass Passthrough) { + r.Passthroughs = append(r.Passthroughs, pass) +} + +// AddFilter appends a hook to modify a request before it is recorded. +// +// Filters are useful for filtering out sensitive parameters from the recorded data. +func (r *Recorder) AddFilter(filter cassette.Filter) { + if r.cassette != nil { + r.cassette.Filters = append(r.cassette.Filters, filter) + } +} + +// AddSaveFilter appends a hook to modify a request before it is saved. +// +// This filter is suitable for treating recorded responses to remove sensitive data. Altering responses using a regular +// AddFilter can have unintended consequences on code that is consuming responses. +func (r *Recorder) AddSaveFilter(filter cassette.Filter) { + if r.cassette != nil { + r.cassette.SaveFilters = append(r.cassette.SaveFilters, filter) + } +} + +// Mode returns recorder state +func (r *Recorder) Mode() Mode { + return r.mode +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/account_events.go b/vendor/github.com/fastly/go-fastly/v9/fastly/account_events.go index 4de5705c2..ce042e38b 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/account_events.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/account_events.go @@ -185,15 +185,13 @@ func (i *GetAPIEventsFilterInput) formatEventFilters() map[string]string { // of any of these filters. It doesn't appear we would need to at present. for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/acl.go b/vendor/github.com/fastly/go-fastly/v9/fastly/acl.go index f14723e28..a75eb2501 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/acl.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/acl.go @@ -43,7 +43,7 @@ func (c *Client) ListACLs(i *ListACLsInput) ([]*ACL, error) { defer resp.Body.Close() var as []*ACL - if err := decodeBodyMap(resp.Body, &as); err != nil { + if err := DecodeBodyMap(resp.Body, &as); err != nil { return nil, err } return as, nil @@ -77,7 +77,7 @@ func (c *Client) CreateACL(i *CreateACLInput) (*ACL, error) { defer resp.Body.Close() var a *ACL - if err := decodeBodyMap(resp.Body, &a); err != nil { + if err := DecodeBodyMap(resp.Body, &a); err != nil { return nil, err } return a, nil @@ -114,7 +114,7 @@ func (c *Client) DeleteACL(i *DeleteACLInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { @@ -154,7 +154,7 @@ func (c *Client) GetACL(i *GetACLInput) (*ACL, error) { defer resp.Body.Close() var a *ACL - if err := decodeBodyMap(resp.Body, &a); err != nil { + if err := DecodeBodyMap(resp.Body, &a); err != nil { return nil, err } return a, nil @@ -193,7 +193,7 @@ func (c *Client) UpdateACL(i *UpdateACLInput) (*ACL, error) { defer resp.Body.Close() var a *ACL - if err := decodeBodyMap(resp.Body, &a); err != nil { + if err := DecodeBodyMap(resp.Body, &a); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/acl_entry.go b/vendor/github.com/fastly/go-fastly/v9/fastly/acl_entry.go index 8dd1f2265..59e20123d 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/acl_entry.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/acl_entry.go @@ -122,7 +122,7 @@ func (c *Client) GetACLEntry(i *GetACLEntryInput) (*ACLEntry, error) { defer resp.Body.Close() var e *ACLEntry - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } @@ -163,7 +163,7 @@ func (c *Client) CreateACLEntry(i *CreateACLEntryInput) (*ACLEntry, error) { defer resp.Body.Close() var e *ACLEntry - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } @@ -201,7 +201,7 @@ func (c *Client) DeleteACLEntry(i *DeleteACLEntryInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } @@ -251,7 +251,7 @@ func (c *Client) UpdateACLEntry(i *UpdateACLEntryInput) (*ACLEntry, error) { defer resp.Body.Close() var e *ACLEntry - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } @@ -307,5 +307,5 @@ func (c *Client) BatchModifyACLEntries(i *BatchModifyACLEntriesInput) error { var batchModifyResult map[string]string - return decodeBodyMap(resp.Body, &batchModifyResult) + return DecodeBodyMap(resp.Body, &batchModifyResult) } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/automation_token.go b/vendor/github.com/fastly/go-fastly/v9/fastly/automation_token.go index 9d7463207..a39d6f5b7 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/automation_token.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/automation_token.go @@ -111,7 +111,7 @@ func (c *Client) GetAutomationToken(i *GetAutomationTokenInput) (*AutomationToke defer resp.Body.Close() var t *AutomationToken - if err := decodeBodyMap(resp.Body, &t); err != nil { + if err := DecodeBodyMap(resp.Body, &t); err != nil { return nil, err } return t, nil @@ -154,7 +154,7 @@ func (c *Client) CreateAutomationToken(i *CreateAutomationTokenInput) (*Automati defer resp.Body.Close() var t *AutomationToken - if err := decodeBodyMap(resp.Body, &t); err != nil { + if err := DecodeBodyMap(resp.Body, &t); err != nil { return nil, err } return t, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/backend.go b/vendor/github.com/fastly/go-fastly/v9/fastly/backend.go index 4f041d159..265ce468f 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/backend.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/backend.go @@ -73,7 +73,7 @@ func (c *Client) ListBackends(i *ListBackendsInput) ([]*Backend, error) { defer resp.Body.Close() var bs []*Backend - if err := decodeBodyMap(resp.Body, &bs); err != nil { + if err := DecodeBodyMap(resp.Body, &bs); err != nil { return nil, err } return bs, nil @@ -167,7 +167,7 @@ func (c *Client) CreateBackend(i *CreateBackendInput) (*Backend, error) { defer resp.Body.Close() var b *Backend - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -204,7 +204,7 @@ func (c *Client) GetBackend(i *GetBackendInput) (*Backend, error) { defer resp.Body.Close() var b *Backend - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -303,7 +303,7 @@ func (c *Client) UpdateBackend(i *UpdateBackendInput) (*Backend, error) { defer resp.Body.Close() var b *Backend - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -340,7 +340,7 @@ func (c *Client) DeleteBackend(i *DeleteBackendInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/billing.go b/vendor/github.com/fastly/go-fastly/v9/fastly/billing.go index 2e8923262..b3c73f3c2 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/billing.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/billing.go @@ -78,7 +78,7 @@ func (c *Client) GetBilling(i *GetBillingInput) (*Billing, error) { defer resp.Body.Close() var b *Billing - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/client.go b/vendor/github.com/fastly/go-fastly/v9/fastly/client.go index dc05342be..ab54e611e 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/client.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/client.go @@ -62,7 +62,7 @@ const UserAgentEnvVar = "FASTLY_USER_AGENT" var ProjectURL = "github.com/fastly/go-fastly" // ProjectVersion is the version of this library. -var ProjectVersion = "9.11.0" +var ProjectVersion = "9.13.0" // UserAgent is the user agent for this particular client. var UserAgent = fmt.Sprintf("FastlyGo/%s (+%s; %s)", @@ -210,6 +210,20 @@ func (c *Client) Get(p string, ro *RequestOptions) (*http.Response, error) { return c.Request("GET", p, ro) } +// GetJSON issues an HTTP GET request and indicates that the response +// should be JSON encoded. +func (c *Client) GetJSON(p string, ro *RequestOptions) (*http.Response, error) { + if ro == nil { + ro = new(RequestOptions) + } + if ro.Headers == nil { + ro.Headers = make(map[string]string) + } + ro.Parallel = true + ro.Headers["Accept"] = JSONMimeType + return c.Request("GET", p, ro) +} + // Head issues an HTTP HEAD request. func (c *Client) Head(p string, ro *RequestOptions) (*http.Response, error) { if ro == nil { @@ -651,8 +665,8 @@ func checkResp(resp *http.Response, err error) (*http.Response, error) { } } -// decodeBodyMap is used to decode an HTTP response body into a mapstructure struct. -func decodeBodyMap(body io.Reader, out any) error { +// DecodeBodyMap is used to decode an HTTP response body into a mapstructure struct. +func DecodeBodyMap(body io.Reader, out any) error { var parsed any dec := json.NewDecoder(body) if err := dec.Decode(&parsed); err != nil { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/compute_package.go b/vendor/github.com/fastly/go-fastly/v9/fastly/compute_package.go index 1d543c5f6..2423ce3b2 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/compute_package.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/compute_package.go @@ -113,7 +113,7 @@ func MakePackagePath(serviceID string, serviceVersion int) (string, error) { // PopulatePackage encapsulates the decoding of returned package data. func PopulatePackage(body io.ReadCloser) (*Package, error) { var p *Package - if err := decodeBodyMap(body, &p); err != nil { + if err := DecodeBodyMap(body, &p); err != nil { return nil, err } return p, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/config_store.go b/vendor/github.com/fastly/go-fastly/v9/fastly/config_store.go index 32d740ec2..e570de2c6 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/config_store.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/config_store.go @@ -216,7 +216,7 @@ func (c *Client) ListConfigStoreServices(i *ListConfigStoreServicesInput) ([]*Se defer resp.Body.Close() var ss []*Service - if err = decodeBodyMap(resp.Body, &ss); err != nil { + if err = DecodeBodyMap(resp.Body, &ss); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/config_store_item.go b/vendor/github.com/fastly/go-fastly/v9/fastly/config_store_item.go index 9f870a518..f54829e61 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/config_store_item.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/config_store_item.go @@ -251,5 +251,5 @@ func (c *Client) BatchModifyConfigStoreItems(i *BatchModifyConfigStoreItemsInput defer resp.Body.Close() var batchModifyResult map[string]string - return decodeBodyMap(resp.Body, &batchModifyResult) + return DecodeBodyMap(resp.Body, &batchModifyResult) } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/datacenters.go b/vendor/github.com/fastly/go-fastly/v9/fastly/datacenters.go index ac62eeee2..a6ced2d72 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/datacenters.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/datacenters.go @@ -26,7 +26,7 @@ func (c *Client) AllDatacenters() (datacenters []Datacenter, err error) { defer resp.Body.Close() var m []Datacenter - if err := decodeBodyMap(resp.Body, &m); err != nil { + if err := DecodeBodyMap(resp.Body, &m); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary.go b/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary.go index ffba312a6..803cfce08 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary.go @@ -43,7 +43,7 @@ func (c *Client) ListDictionaries(i *ListDictionariesInput) ([]*Dictionary, erro defer resp.Body.Close() var bs []*Dictionary - if err := decodeBodyMap(resp.Body, &bs); err != nil { + if err := DecodeBodyMap(resp.Body, &bs); err != nil { return nil, err } return bs, nil @@ -79,7 +79,7 @@ func (c *Client) CreateDictionary(i *CreateDictionaryInput) (*Dictionary, error) defer resp.Body.Close() var b *Dictionary - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -116,7 +116,7 @@ func (c *Client) GetDictionary(i *GetDictionaryInput) (*Dictionary, error) { defer resp.Body.Close() var b *Dictionary - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -157,7 +157,7 @@ func (c *Client) UpdateDictionary(i *UpdateDictionaryInput) (*Dictionary, error) defer resp.Body.Close() var b *Dictionary - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_info.go b/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_info.go index deecd8929..1d7f200a1 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_info.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_info.go @@ -46,7 +46,7 @@ func (c *Client) GetDictionaryInfo(i *GetDictionaryInfoInput) (*DictionaryInfo, defer resp.Body.Close() var b *DictionaryInfo - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_item.go b/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_item.go index a3ad348ac..a02806662 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_item.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/dictionary_item.go @@ -121,7 +121,7 @@ func (c *Client) CreateDictionaryItem(i *CreateDictionaryItemInput) (*Dictionary defer resp.Body.Close() var b *DictionaryItem - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -175,7 +175,7 @@ func (c *Client) GetDictionaryItem(i *GetDictionaryItemInput) (*DictionaryItem, defer resp.Body.Close() var b *DictionaryItem - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -214,7 +214,7 @@ func (c *Client) UpdateDictionaryItem(i *UpdateDictionaryItemInput) (*Dictionary defer resp.Body.Close() var b *DictionaryItem - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -262,7 +262,7 @@ func (c *Client) BatchModifyDictionaryItems(i *BatchModifyDictionaryItemsInput) defer resp.Body.Close() var batchModifyResult map[string]string - return decodeBodyMap(resp.Body, &batchModifyResult) + return DecodeBodyMap(resp.Body, &batchModifyResult) } // DeleteDictionaryItemInput is the input parameter to DeleteDictionaryItem. diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/diff.go b/vendor/github.com/fastly/go-fastly/v9/fastly/diff.go index 67e43117e..28b0d5043 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/diff.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/diff.go @@ -49,7 +49,7 @@ func (c *Client) GetDiff(i *GetDiffInput) (*Diff, error) { defer resp.Body.Close() var d *Diff - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domain.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domain.go index 4481ce577..3e1c59af2 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/domain.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domain.go @@ -45,7 +45,7 @@ func (c *Client) ListDomains(i *ListDomainsInput) ([]*Domain, error) { defer resp.Body.Close() var ds []*Domain - if err := decodeBodyMap(resp.Body, &ds); err != nil { + if err := DecodeBodyMap(resp.Body, &ds); err != nil { return nil, err } return ds, nil @@ -81,7 +81,7 @@ func (c *Client) CreateDomain(i *CreateDomainInput) (*Domain, error) { defer resp.Body.Close() var d *Domain - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -118,7 +118,7 @@ func (c *Client) GetDomain(i *GetDomainInput) (*Domain, error) { defer resp.Body.Close() var d *Domain - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -159,7 +159,7 @@ func (c *Client) UpdateDomain(i *UpdateDomainInput) (*Domain, error) { defer resp.Body.Close() var d *Domain - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_create.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_create.go new file mode 100644 index 000000000..f809c6dad --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_create.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/fastly/go-fastly/v9/fastly" +) + +// CreateInput specifies the information needed for the Create() function to +// perform the operation. +type CreateInput struct { + // FQDN is the fully-qualified domain name of the domain (required). + FQDN *string `json:"fqdn"` + // ServiceID is the service_id associated with the domain or nil if there + // is no association (optional) + ServiceID *string `json:"service_id"` +} + +// Create creates a new domain. +func Create(c *fastly.Client, i *CreateInput) (*Data, error) { + resp, err := c.PostJSON("/domains/v1", i, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var d *Data + if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { + return nil, fmt.Errorf("failed to decode json response: %w", err) + } + return d, nil +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_delete.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_delete.go new file mode 100644 index 000000000..ce21e2801 --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_delete.go @@ -0,0 +1,35 @@ +package v1 + +import ( + "net/http" + + "github.com/fastly/go-fastly/v9/fastly" +) + +// DeleteInput specifies the information needed for the Delete() function to +// perform the operation. +type DeleteInput struct { + // DomainID of the domain to delete (required). + DomainID *string +} + +// Delete deletes the specified domain. +func Delete(c *fastly.Client, i *DeleteInput) error { + if i.DomainID == nil { + return fastly.ErrMissingDomainID + } + + path := fastly.ToSafeURL("domains", "v1", *i.DomainID) + + resp, err := c.Delete(path, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + return fastly.NewHTTPError(resp) + } + + return nil +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_get.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_get.go new file mode 100644 index 000000000..2e5651eb8 --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_get.go @@ -0,0 +1,37 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/fastly/go-fastly/v9/fastly" +) + +// GetInput specifies the information needed for the Get() function to perform +// the operation. +type GetInput struct { + // DomainID is the domain identifier (required). + DomainID *string +} + +// Get retrieves a specified domain. +func Get(c *fastly.Client, i *GetInput) (*Data, error) { + if i.DomainID == nil { + return nil, fastly.ErrMissingDomainID + } + + path := fastly.ToSafeURL("domains", "v1", *i.DomainID) + + resp, err := c.Get(path, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var d *Data + if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { + return nil, fmt.Errorf("failed to decode json response: %w", err) + } + + return d, nil +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_list.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_list.go new file mode 100644 index 000000000..fdb81f14d --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_list.go @@ -0,0 +1,61 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/fastly/go-fastly/v9/fastly" +) + +// ListInput specifies the information needed for the List() function to perform +// the operation. +type ListInput struct { + // Cursor is the cursor value from the next_cursor field of a previous + // response, used to retrieve the next page. To request the first page, this + // should be an empty string or nil. + Cursor *string + // FQDN filters results by the FQDN using a fuzzy/partial match (optional). + FQDN *string + // Limit is the maximum number of results to return (optional). + Limit *int + // ServiceID filter results based on a service_id (optional). + ServiceID *string + // Sort is the order in which to list the results (optional). + Sort *string +} + +// List retrieves a list of domains, with optional filtering and pagination. +func List(c *fastly.Client, i *ListInput) (*Collection, error) { + ro := &fastly.RequestOptions{ + Params: map[string]string{}, + } + if i.Cursor != nil { + ro.Params["cursor"] = *i.Cursor + } + if i.Limit != nil { + ro.Params["limit"] = strconv.Itoa(*i.Limit) + } + if i.FQDN != nil { + ro.Params["fqdn"] = *i.FQDN + } + if i.ServiceID != nil { + ro.Params["service_id"] = *i.ServiceID + } + if i.Sort != nil { + ro.Params["sort"] = *i.Sort + } + + resp, err := c.Get("/domains/v1", ro) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var cl *Collection + if err := json.NewDecoder(resp.Body).Decode(&cl); err != nil { + return nil, fmt.Errorf("failed to decode json response: %w", err) + } + + return cl, nil +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_response.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_response.go new file mode 100644 index 000000000..ddda7bf74 --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_response.go @@ -0,0 +1,43 @@ +package v1 + +import ( + "time" +) + +// Collection is the API response structure for the list operation. +type Collection struct { + // Data contains the API data. + Data []Data `json:"data"` + // Meta contains metadata related to paginating the full dataset. + Meta Meta `json:"meta"` +} + +// Data is a subset of the API response structure containing the specific API +// data itself. +type Data struct { + // CreatedAt is the date and time in ISO 8601 format. + CreatedAt time.Time `json:"created_at"` + // ID is the domain identifier (UUID). + DomainID string `json:"id"` + // FQDN is the fully-qualified domain name of the domain. Read-only + // after creation. + FQDN string `json:"fqdn"` + // ServiceID is the service_id associated with the domain or nil if there + // is no association. + ServiceID *string `json:"service_id"` + // UpdatedAt is the date and time in ISO 8601 format. + UpdatedAt time.Time `json:"updated_at"` +} + +// Meta is a subset of the API response structure containing metadata related to +// paginating the full dataset. +type Meta struct { + // Limit is how many results are included in this response. + Limit int `json:"limit"` + // NextCursor is the cursor value used to retrieve the next page. + NextCursor string `json:"next_cursor"` + // Sort is the field used to order the response by. + Sort string `json:"sort"` + // Total is the total number of results. + Total int `json:"total"` +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_update.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_update.go new file mode 100644 index 000000000..8730da707 --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/api_update.go @@ -0,0 +1,39 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/fastly/go-fastly/v9/fastly" +) + +// UpdateInput specifies the information needed for the Update() function to +// perform the operation. +type UpdateInput struct { + // DomainID is the domain identifier (required). + DomainID *string `json:"-"` + // ServiceID is the service_id associated with the domain or nil if there + // is no association (optional) + ServiceID *string `json:"service_id"` +} + +// Update updates the specified domain. +func Update(c *fastly.Client, i *UpdateInput) (*Data, error) { + if i.DomainID == nil { + return nil, fastly.ErrMissingDomainID + } + + path := fastly.ToSafeURL("domains", "v1", *i.DomainID) + + resp, err := c.PatchJSON(path, i, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var d *Data + if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { + return nil, fmt.Errorf("failed to decode json response: %w", err) + } + return d, nil +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/doc.go b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/doc.go new file mode 100644 index 000000000..e3dfd1d8f --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/domains/v1/doc.go @@ -0,0 +1,3 @@ +// Package v1 contains functionality which offer various operations to enable, +// disable, and configure Fastly domains using its v1 implementation. +package v1 diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/edge_check.go b/vendor/github.com/fastly/go-fastly/v9/fastly/edge_check.go index 6e391224c..d82ccbdb4 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/edge_check.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/edge_check.go @@ -47,7 +47,7 @@ func (c *Client) EdgeCheck(i *EdgeCheckInput) ([]*EdgeCheck, error) { defer resp.Body.Close() var e []*EdgeCheck - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/errors.go b/vendor/github.com/fastly/go-fastly/v9/fastly/errors.go index c622f9380..406a06936 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/errors.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/errors.go @@ -89,6 +89,14 @@ var ErrMissingCertBlob = NewFieldError("CertBlob") // requires a "CertBundle" key, but one was not set. var ErrMissingCertBundle = NewFieldError("CertBundle") +// ErrMissingComputeACLID is an error that is returned when an input struct +// requires a "ComputeACLID" key, but one was not set. +var ErrMissingComputeACLID = NewFieldError("ComputeACLID") + +// ErrMissingComputeACLIP is an error that is returned when an input struct +// requires a "ComputeACLIP" key, but one was not set. +var ErrMissingComputeACLIP = NewFieldError("ComputeACLIP") + // ErrMissingContent is an error that is returned when an input struct // requires a "Content" key, but one was not set. var ErrMissingContent = NewFieldError("Content") @@ -125,6 +133,10 @@ var ErrMissingTokenID = errors.New("missing required field 'TokenID'") // requires a "ID" key, but one was not set. var ErrMissingID = NewFieldError("ID") +// ErrMissingDomainID is an error that is returned when an input struct +// requires a "DomainID" key, but one was not set. +var ErrMissingDomainID = NewFieldError("DomainID") + // ErrMissingEntryID is an error that is returned when an input struct // requires a "EntryID" key, but one was not set. var ErrMissingEntryID = NewFieldError("EntryID") @@ -411,7 +423,7 @@ func NewHTTPError(resp *http.Response) *HTTPError { switch resp.Header.Get("Content-Type") { case jsonapi.MediaType: // If this is a jsonapi response, decode it accordingly. - if err := decodeBodyMap(body, &e); err != nil { + if err := DecodeBodyMap(body, &e); err != nil { addDecodeErr() } @@ -435,7 +447,7 @@ func NewHTTPError(resp *http.Response) *HTTPError { default: var lerr *legacyError - if err := decodeBodyMap(body, &lerr); err != nil { + if err := DecodeBodyMap(body, &lerr); err != nil { addDecodeErr() } else if lerr != nil { // This is for better handling the KV Store Bulk Insert endpoint. @@ -454,13 +466,21 @@ func NewHTTPError(resp *http.Response) *HTTPError { if r, ok := le["reason"]; ok { detail, _ = r.(string) } + if d, ok := le["detail"]; ok { + detail, _ = d.(string) + } + var title string if i, ok := le["index"]; ok { index, _ = i.(float64) + title = fmt.Sprintf("error at index: %v", index) + } + if t, ok := le["title"]; ok { + title, _ = t.(string) } e.Errors = append(e.Errors, &ErrorObject{ Code: code, Detail: detail, - Title: fmt.Sprintf("error at index: %v", index), + Title: title, }) } } else { @@ -521,12 +541,17 @@ func (e *HTTPError) Error() string { } // String implements the stringer interface and returns the string representing -// the string text that includes the status code and corresponding status text. +// the error text that includes the status code and corresponding status text. func (e *HTTPError) String() string { return e.Error() } -// IsNotFound returns true if the HTTP error code is a 404, false otherwise. +// IsBadRequest returns true if the HTTP status code is 400, false otherwise. +func (e *HTTPError) IsBadRequest() bool { + return e.StatusCode == 400 +} + +// IsNotFound returns true if the HTTP status code is 404, false otherwise. func (e *HTTPError) IsNotFound() bool { return e.StatusCode == 404 } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/fastly_test_utils.go b/vendor/github.com/fastly/go-fastly/v9/fastly/fastly_test_utils.go new file mode 100644 index 000000000..fedf9b532 --- /dev/null +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/fastly_test_utils.go @@ -0,0 +1,668 @@ +package fastly + +import ( + "fmt" + "os" + "sync" + "testing" + + "github.com/dnaeon/go-vcr/cassette" + "github.com/dnaeon/go-vcr/recorder" +) + +// TestClient is the test client. +var TestClient = DefaultClient() + +// TestStatsClient is the test client for realtime stats. +var TestStatsClient = NewRealtimeStatsClient() + +// ID of the Delivery service for testing. +var TestDeliveryServiceID = deliveryServiceIDForTest() + +// ID of the Compute service for testing. +var TestComputeServiceID = computeServiceIDForTest() + +// ID of the NGWAF workspace for testing. +var TestNGWAFWorkspaceID = ngwafWorkspaceIDForTest() + +// ID of the default Delivery service for testing. +var DefaultDeliveryTestServiceID = "kKJb5bOFI47uHeBVluGfX1" + +// ID of the default Compute service for testing. +var defaultComputeTestServiceID = "XsjdElScZGjmfCcTwsYRC1" + +// ID of the default NGWAF workspace for testing. +var defaultNGWAFWorkspaceID = "alk6DTsYKHKucJCOIavaJM" + +const ( + // ServiceTypeVCL is the type for VCL services. + ServiceTypeVCL = "vcl" + // ServiceTypeWasm is the type for Wasm services. + ServiceTypeWasm = "wasm" +) + +// testVersionLock is a lock around version creation because the Fastly API +// kinda dies on concurrent requests to create a version. +var testVersionLock sync.Mutex + +func deliveryServiceIDForTest() string { + if tsid := os.Getenv("FASTLY_TEST_DELIVERY_SERVICE_ID"); tsid != "" { + return tsid + } + + return DefaultDeliveryTestServiceID +} + +func computeServiceIDForTest() string { + if tsid := os.Getenv("FASTLY_TEST_COMPUTE_SERVICE_ID"); tsid != "" { + return tsid + } + + return defaultComputeTestServiceID +} + +func ngwafWorkspaceIDForTest() string { + if tsid := os.Getenv("FASTLY_TEST_NGWAF_WORKSPACE_ID"); tsid != "" { + return tsid + } + return defaultNGWAFWorkspaceID +} + +func vcrDisabled() bool { + vcrDisable := os.Getenv("VCR_DISABLE") + + return vcrDisable != "" +} + +func Record(t *testing.T, fixture string, f func(*Client)) { + client := DefaultClient() + + if vcrDisabled() { + f(client) + } else { + r := getRecorder(t, fixture) + defer stopRecorder(t, r) + client.HTTPClient.Transport = r + f(client) + } +} + +func RecordIgnoreBody(t *testing.T, fixture string, f func(*Client)) { + client := DefaultClient() + + if vcrDisabled() { + f(client) + } else { + r := getRecorder(t, fixture) + defer stopRecorder(t, r) + + r.AddFilter(func(i *cassette.Interaction) error { + i.Request.Body = "" + return nil + }) + + client.HTTPClient.Transport = r + f(client) + } +} + +func getRecorder(t *testing.T, fixture string) *recorder.Recorder { + r, err := recorder.New("fixtures/" + fixture) + if err != nil { + t.Fatal(err) + } + + // Add a filter which removes Fastly-Key header from all recorded requests. + r.AddFilter(func(i *cassette.Interaction) error { + delete(i.Request.Headers, "Fastly-Key") + return nil + }) + + return r +} + +func stopRecorder(t *testing.T, r *recorder.Recorder) { + if err := r.Stop(); err != nil { + t.Fatal(err) + } +} + +func RecordRealtimeStats(t *testing.T, fixture string, f func(*RTSClient)) { + r, err := recorder.New("fixtures/" + fixture) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := r.Stop(); err != nil { + t.Fatal(err) + } + }() + + client := NewRealtimeStatsClient() + client.client.HTTPClient.Transport = r + + f(client) +} + +func createTestService(t *testing.T, serviceFixture, serviceNameSuffix string) *Service { + var err error + var service *Service + + Record(t, serviceFixture, func(client *Client) { + service, err = client.CreateService(&CreateServiceInput{ + Name: ToPointer(fmt.Sprintf("test_service_%s", serviceNameSuffix)), + Comment: ToPointer("go-fastly client test"), + Type: ToPointer(ServiceTypeVCL), + }) + }) + if err != nil { + t.Fatal(err) + } + + return service +} + +func createTestServiceWasm(t *testing.T, serviceFixture, serviceNameSuffix string) *Service { + var err error + var service *Service + + Record(t, serviceFixture, func(client *Client) { + service, err = client.CreateService(&CreateServiceInput{ + Name: ToPointer(fmt.Sprintf("test_service_wasm_%s", serviceNameSuffix)), + Comment: ToPointer("go-fastly wasm client test"), + Type: ToPointer(ServiceTypeWasm), + }) + }) + if err != nil { + t.Fatal(err) + } + + return service +} + +// testVersion is a new, blank version suitable for testing. +func testVersion(t *testing.T, c *Client) *Version { + testVersionLock.Lock() + defer testVersionLock.Unlock() + + v, err := c.CreateVersion(&CreateVersionInput{ + ServiceID: TestDeliveryServiceID, + }) + if err != nil { + t.Fatal(err) + } + return v +} + +func CreateTestVersion(t *testing.T, versionFixture, serviceID string) *Version { + var err error + var version *Version + + Record(t, versionFixture, func(client *Client) { + testVersionLock.Lock() + defer testVersionLock.Unlock() + + version, err = client.CreateVersion(&CreateVersionInput{ + ServiceID: serviceID, + }) + if err != nil { + t.Fatal(err) + } + }) + + return version +} + +func createTestDictionary(t *testing.T, dictionaryFixture, serviceID string, version int, dictionaryNameSuffix string) *Dictionary { + var err error + var dictionary *Dictionary + + Record(t, dictionaryFixture, func(client *Client) { + dictionary, err = client.CreateDictionary(&CreateDictionaryInput{ + ServiceID: serviceID, + ServiceVersion: version, + Name: ToPointer(fmt.Sprintf("test_dictionary_%s", dictionaryNameSuffix)), + }) + }) + if err != nil { + t.Fatal(err) + } + return dictionary +} + +func deleteTestDictionary(t *testing.T, dictionary *Dictionary, deleteFixture string) { + var err error + + Record(t, deleteFixture, func(client *Client) { + err = client.DeleteDictionary(&DeleteDictionaryInput{ + ServiceID: *dictionary.ServiceID, + ServiceVersion: *dictionary.ServiceVersion, + Name: *dictionary.Name, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func createTestACL(t *testing.T, createFixture, serviceID string, version int, aclNameSuffix string) *ACL { + var err error + var acl *ACL + + Record(t, createFixture, func(client *Client) { + acl, err = client.CreateACL(&CreateACLInput{ + ServiceID: serviceID, + ServiceVersion: version, + Name: ToPointer(fmt.Sprintf("test_acl_%s", aclNameSuffix)), + }) + }) + if err != nil { + t.Fatal(err) + } + return acl +} + +func deleteTestACL(t *testing.T, acl *ACL, deleteFixture string) { + var err error + + Record(t, deleteFixture, func(client *Client) { + err = client.DeleteACL(&DeleteACLInput{ + ServiceID: *acl.ServiceID, + ServiceVersion: *acl.ServiceVersion, + Name: *acl.Name, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func createTestPool(t *testing.T, createFixture, serviceID string, version int, poolNameSuffix string) *Pool { + var err error + var pool *Pool + + Record(t, createFixture, func(client *Client) { + pool, err = client.CreatePool(&CreatePoolInput{ + ServiceID: serviceID, + ServiceVersion: version, + Name: ToPointer(fmt.Sprintf("test_pool_%s", poolNameSuffix)), + }) + }) + if err != nil { + t.Fatal(err) + } + return pool +} + +func createTestLogging(t *testing.T, fixture, serviceID string, serviceNumber int) { + var err error + + Record(t, fixture, func(c *Client) { + _, err = c.CreateSyslog(&CreateSyslogInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + Name: ToPointer("test-syslog"), + Address: ToPointer("example.com"), + Hostname: ToPointer("example.com"), + Port: ToPointer(1234), + Token: ToPointer("abcd1234"), + Format: ToPointer("format"), + FormatVersion: ToPointer(2), + MessageType: ToPointer("classic"), + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteTestPool(t *testing.T, pool *Pool, deleteFixture string) { + var err error + + Record(t, deleteFixture, func(client *Client) { + err = client.DeletePool(&DeletePoolInput{ + ServiceID: *pool.ServiceID, + ServiceVersion: *pool.ServiceVersion, + Name: *pool.Name, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteTestLogging(t *testing.T, fixture, serviceID string, serviceNumber int) { + var err error + + Record(t, fixture, func(c *Client) { + err = c.DeleteSyslog(&DeleteSyslogInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + Name: "test-syslog", + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func createTestWAFCondition(t *testing.T, fixture, serviceID, name string, serviceNumber int) *Condition { + var err error + var condition *Condition + + Record(t, fixture, func(c *Client) { + condition, err = c.CreateCondition(&CreateConditionInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + Name: ToPointer(name), + Statement: ToPointer("req.url~+\"index.html\""), + Type: ToPointer("PREFETCH"), // This must be a prefetch condition + Priority: ToPointer(1), + }) + }) + if err != nil { + t.Fatal(err) + } + return condition +} + +func deleteTestCondition(t *testing.T, fixture, serviceID, name string, serviceNumber int) { + var err error + + Record(t, fixture, func(c *Client) { + err = c.DeleteCondition(&DeleteConditionInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + Name: name, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func createTestWAFResponseObject(t *testing.T, fixture, serviceID, name string, serviceNumber int) *ResponseObject { + var err error + var ro *ResponseObject + + Record(t, fixture, func(c *Client) { + ro, err = c.CreateResponseObject(&CreateResponseObjectInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + Name: ToPointer(name), + Status: ToPointer(403), + }) + }) + if err != nil { + t.Fatal(err) + } + return ro +} + +func deleteTestResponseObject(t *testing.T, fixture, serviceID, name string, serviceNumber int) { + var err error + + Record(t, fixture, func(c *Client) { + err = c.DeleteResponseObject(&DeleteResponseObjectInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + Name: name, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func createWAF(t *testing.T, fixture, serviceID, condition, response string, serviceNumber int) *WAF { + var err error + var waf *WAF + + Record(t, fixture, func(c *Client) { + waf, err = c.CreateWAF(&CreateWAFInput{ + ServiceID: serviceID, + ServiceVersion: serviceNumber, + PrefetchCondition: condition, + Response: response, + }) + }) + if err != nil { + t.Fatal(err) + } + return waf +} + +func deleteWAF(t *testing.T, fixture, wafID string) { + var err error + + Record(t, fixture, func(c *Client) { + err = c.DeleteWAF(&DeleteWAFInput{ + ID: wafID, + ServiceVersion: 1, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteTestService(t *testing.T, cleanupFixture, serviceID string) { + var err error + + Record(t, cleanupFixture, func(client *Client) { + err = client.DeleteService(&DeleteServiceInput{ + ServiceID: serviceID, + }) + }) + if err != nil { + t.Fatal(err) + } +} + +// pgpPublicKey returns a PEM encoded PGP public key suitable for testing. +func pgpPublicKey() string { + return `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFyUD8sBCACyFnB39AuuTygseek+eA4fo0cgwva6/FSjnWq7riouQee8GgQ/ +ibXTRyv4iVlwI12GswvMTIy7zNvs1R54i0qvsLr+IZ4GVGJqs6ZJnvQcqe3xPoR4 +8AnBfw90o32r/LuHf6QCJXi+AEu35koNlNAvLJ2B+KACaNB7N0EeWmqpV/1V2k9p +lDYk+th7LcCuaFNGqKS/PrMnnMqR6VDLCjHhNx4KR79b0Twm/2qp6an3hyNRu8Gn +dwxpf1/BUu3JWf+LqkN4Y3mbOmSUL3MaJNvyQguUzTfS0P0uGuBDHrJCVkMZCzDB +89ag55jCPHyGeHBTd02gHMWzsg3WMBWvCsrzABEBAAG0JXRlcnJhZm9ybSAodGVz +dCkgPHRlc3RAdGVycmFmb3JtLmNvbT6JAU4EEwEIADgWIQSHYyc6Kj9l6HzQsau6 +vFFc9jxV/wUCXJQPywIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRC6vFFc +9jxV/815CAClb32OxV7wG01yF97TzlyTl8TnvjMtoG29Mw4nSyg+mjM3b8N7iXm9 +OLX59fbDAWtBSldSZE22RXd3CvlFOG/EnKBXSjBtEqfyxYSnyOPkMPBYWGL/ApkX +SvPYJ4LKdvipYToKFh3y9kk2gk1DcDBDyaaHvR+3rv1u3aoy7/s2EltAfDS3ZQIq +7/cWTLJml/lleeB/Y6rPj8xqeCYhE5ahw9gsV/Mdqatl24V9Tks30iijx0Hhw+Gx +kATUikMGr2GDVqoIRga5kXI7CzYff4rkc0Twn47fMHHHe/KY9M2yVnMHUXmAZwbG +M1cMI/NH1DjevCKdGBLcRJlhuLPKF/anuQENBFyUD8sBCADIpd7r7GuPd6n/Ikxe +u6h7umV6IIPoAm88xCYpTbSZiaK30Svh6Ywra9jfE2KlU9o6Y/art8ip0VJ3m07L +4RSfSpnzqgSwdjSq5hNour2Fo/BzYhK7yaz2AzVSbe33R0+RYhb4b/6N+bKbjwGF +ftCsqVFMH+PyvYkLbvxyQrHlA9woAZaNThI1ztO5rGSnGUR8xt84eup28WIFKg0K +UEGUcTzz+8QGAwAra+0ewPXo/AkO+8BvZjDidP417u6gpBHOJ9qYIcO9FxHeqFyu +YrjlrxowEgXn5wO8xuNz6Vu1vhHGDHGDsRbZF8pv1d5O+0F1G7ttZ2GRRgVBZPwi +kiyRABEBAAGJATYEGAEIACAWIQSHYyc6Kj9l6HzQsau6vFFc9jxV/wUCXJQPywIb +DAAKCRC6vFFc9jxV/9YOCACe8qmOSnKQpQfW+PqYOqo3dt7JyweTs3FkD6NT8Zml +dYy/vkstbTjPpX6aTvUZjkb46BVi7AOneVHpD5GBqvRsZ9iVgDYHaehmLCdKiG5L +3Tp90NN+QY5WDbsGmsyk6+6ZMYejb4qYfweQeduOj27aavCJdLkCYMoRKfcFYI8c +FaNmEfKKy/r1PO20NXEG6t9t05K/frHy6ZG8bCNYdpagfFVot47r9JaQqWlTNtIR +5+zkkSq/eG9BEtRij3a6cTdQbktdBzx2KBeI0PYc1vlZR0LpuFKZqY9vlE6vTGLR +wMfrTEOvx0NxUM3rpaCgEmuWbB1G1Hu371oyr4srrr+N +=28dr +-----END PGP PUBLIC KEY BLOCK----- +` +} + +// pgpPublicKeyUpdate returns a PEM encoded PGP public key suitable for testing +// updates to the public key field. +func pgpPublicKeyUpdate() string { + return ` +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v3.0.9 +Comment: https://openpgpjs.org + +xsFNBF7XxDYBEACwKFfEjdDsm0lmbdSEM6/RTiUNldEQIpbxIN0pK1z38Rhh +u9VSmO0Jf/x3xTyAvFyc+D5vtxUBp0DlwILVMeTCZ6KX0Yk05hQWTqSMWk6t +QaGXtncvmD9xaHodjuQJ9xJo+lKRhZlli9QnF/x3EcIPSb9fv8BpQRw7M/Kg +O32vkiEV6GvAcZetgLEOy6EkFVx/Y1/kq5crP+gwFxseVcNsH8bB0tMY2maz +TwIKzReyCSjSZn6t+TwdCFzCR6x3cLRegPRqSmpTurL+YtADrHse9hFlPmA0 +nHJFTeF96pZQr9N7Hdm/fep1Lb3fEoVgmNyNtsEgZpu8XGB+2deo9A5C9ai+ +mkSxkIqv0UDyTuJd4M7Uvt9N1LlWYxeDv2kFo1MDqZF/9LwHAhNk+tpp3zNp +68G5OX/TL5MQoXHlkXmoTBh38sby8IeTIPgS3UhIRUj6a1+6BhoSEAIlGOog +fPkG5ED/WK3aAUcqf8jIJWA01qW/GtwKkx4QTzDN4xYWXtgKrOyaPbpyCDFe +RpX9idrDeoXxA+q822WWJ9P+fIZyxoPi9dyRnysC+1fLTQtw97940KnuYC0N +yQRyn4s24gec9P1BVPS2/1HlyJZxTg3eJCtbPnyT1WbHW4P23hIQ3uChc2/x +h6ywc36ppl7zZPtcRhRnRRrBl/7UECAzC3oewwARAQABzRVmb28gPGJhckBl +eGFtcGxlLmNvbT7CwXUEEAEIACkFAl7XxDYGCwkHCAMCCRCyVpbnvRl/KAQV +CAoCAxYCAQIZAQIbAwIeAQAAWwoP/0abBY3HpYbUVFGHDqDHZ7sTHPD8m6eD +Za6lOI85ARie8Wb39a+KhoLxzo8jInizjCqXgXGLy8tL0sZA5s/K52k9dTPx +lwEjaldH8EeEgHBc9FgUK65MbJMEGXXZRKXescvb5vMu5sroHkC0kDY5sPEP ++rUk4PrgJ0VoOvBulXW/NCaOvaocUuYk5xENp9lUeHSSdDZKh2GJ5Ewbd5uG +BRUhby/QBAd7RAjgJDhC8l//RY9J1QjO693b/WBSJUrgvf8AgO24C7K5r1YE +P64hdSPWQO5ErDPPDqfdUVEWfyZh5flYdwJjk0hS5AGSSjdkKg/mxucb3DXJ +KXWY6In1Ws6Zu3TUqHW714Y0gKOmWYUaRjwvDCZ3+4xJ3FhCriL6IPAex9Dl +lIm7BgylkD2Q+KEm9pDUxTI35UBrmZ1Q89I/EjjUqd1qeoZ88p4YSC+9/RPA +9G03tLCjXvs9G224gxfMas2lhz6gsQIBZ77HDYwfbCAcWgLCD3S3KspfYqs+ +M2JTnK5V8+FdRBYJvOj9q+qg9YUTtXYZ6+cEhG/I+6HZt2V6GIOa0vxzJTCk +ITiJqpitX6II0qkh0XpqWMFzoflTEVJxPJBYyHjPDK35pZkoEfvYBBAwoVF8 +M6HiP9Eh3xBZbxRToJvAX3frwydyHJoxrZ0gx1/ht8LP3xsPlrP5zsFNBF7X +xDYBEAC4DHAZOhrp2wa8/v1p+uWdE9egUkscCkmFQUNzde5eIuFoGGv5ggi4 +EPrj9JxO0S0J+UVCGL4rr0WxbalOSGRe1pgiBK2AIcYtMkESfRQQ7E0H1gCq +TvcimJ9U+NdkFrZfcIu0I/0Pf5Ml+afhl9FXecEwTPG8Ff06nu62PjhABwsx +8wnNhzjXM+xv74ElalccYV8TqWkdJCSc+tKcHXPT0VClWzdwwyfqgozP+5qQ +1Cj9OFGki3KMOn4kD9cX4zS6oC+hj+TIQzh3HIKSLsTDLLg+U0X8+qtYuX3I +2Fi/YLjpIlT/oYlHJlxozbAedLl9NPXmqmxnTEWhsqW1LmjLPwiCxP8plh2b +19QUEmQI48yyp6oWbiBnx0aeXLkSv/LwZ5UJJS+ia8aaN5WiipXZKXgvurS0 +2+STGTeD1vEjoQCAKzk35gaz2cTfVZsFcG3ISgccQO3eGE/6+pl0ZpCw2OIs +WOQpnkuAaUkl7fMpLV/D1l660Hz8YmimKppxEow28vXinPt/nOUVjaCOeevJ +IGbjYUBFChu9Oykxg/ZaLRM7OTCe95+TCawcuuJnIXtVekisOE7lRUj/yhx4 +cgfVuHtUxg7s+ssmkizV+T9bCpJj3UnUwaVpxPXjGfM1mn+X98xmFMh4zj9U +UESh9N7/EtnwAynAbsdfT/kT+McV3QARAQABwsFfBBgBCAATBQJe18Q2CRCy +VpbnvRl/KAIbDAAA5IwP+QEO6IOJLhr+XmDWLU+FSptWsuMNvWUiHq7wTfD2 +70GmBFbdGX5QHtyg4CrbdnfAfjeifhqZYtLNDjLSEDeQDAPX5l+NnUEq6Luh ++ljFdaEFirVA2OCesK1zZ9utF4ZegI9Zukn92OSFYqmEKhhAIH3MaRhZZxQq +3cyXkw0fg2pPkn6PKcDSyLE7RwQMX6+P8X6u/t6kOcWyBJxK4OuJCB2sjFQo +Nphey2TwKv2swJC4mxbHL0S771HelGN+Ske64G5fAxrIEBc+drKtNHlnCZ30 +0NRVPzkJGD6dM9WgMyfm/d80YAsnckHGy6WuZ0mQrKiUOoN4vib+tEGnmd+r +K/3afuGq/Shc0tiUFzEKGAUbzaSeWKO6ZgYYGnpxryr1/AEr5z3sPERk3Dm0 ++jJ8wIrBBAxUDtuU2TxFRZvvFihrsvMFjBEzEjWNfJRTL7vCBRqR5Qem95tj +fofwzzgSSgf0mlHmyCvhY2Vcr0ozX9oucPJX90cBoWkRqLNCntOuSZSeFUys +IKGV3nYnpXB+VNYozayIoAhP/yVLO7DTibYbzpHgDe3fBG6NKBkQBenHCHpi +BA/4Ob0YNQ6SmKTFS4fIKbMMCnCVgLhMOSyeebVbAuvhP7V6lFdHC9agRXKh +4P1Y4fC/E3SHa+YSCvO4Usl7BoZnKjSULjaeLqeXnlIM +=Wto9 +-----END PGP PUBLIC KEY BLOCK----- +` +} + +// privatekey returns a ASN.1 DER encoded key suitable for testing. +func privateKey() string { + return `-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCd4jPcvMlmvT/j +EVY/SY/q6TRgw60tc9pJe0oAwWYrBWAh3HLy3589dDglpCOH1FngG7INkCWfItRH +RQ7Vp6oT18qlLB0WUQCPdro73+IPa+yA9DBDX1SjiGO8nt2qYR1BFuZQJJCWntdk +HMco02623xNJEF6QR2GqhT0WbAk12TjmX0rhFcXK0STI5bdSfLYZxhpmmt8h+qNc +reoUHU6fSTc83lMFnu/D2gJrPEWi3Gg1wu37IAciPI/XKCjpbkHYp2MZASwBzKaO +8ekLjmAN6ILmVwFKTFyTCQkA9jXdFi99w8uFx3D64cPpXwlVuxNbG1jtymtWVXrt +BRBdHqzigJn0JNnqDCc0faisJpGzNq2KuaqzdfWuUXbccaL+MzrjsryOm9VM+T2o +zdXcl87iiJjlxZohC+8pAvJMQ7vBwPdKQtlSt1dJserbEfx+szASINo3udZyf9dV +QpiIEuf/o7KNYfqFLahwLFotf+bvJa0MzAtwkd1SixgloXxezaUPNg2C5wYetLfx +OJmNFl+xgwGPEEzCneHZ5ilOnZymA812UdYXtXNPPujV/qXcycYofEPxBtD5DTZW +tDGmzA7Iu3hTFAo0jzlBvfbxljzbzKj/xLwpglu1SpqYeDUjR48JMU0zkA/2Rl/S +KUFmZAscgiDMQItYQoLtMykfvlPuwQIDAQABAoICAF0M8SX6efS8Owf3ss4v68s2 +UHFrQgiUzCUcrZvOYAmg7GxogbLUywQsF99PYsVuCN5FVGYb+6BTpaqvb7PKUjnJ +p5w7aJU7fkoPXmllZNVT9Rp3UG6Uo8yR2L5VHy2IePZgqbK4KiMrUKSnNVXBbvIG +fVZFeIYuG8ilKECrwa3j7V4Q8Y/BBkanhreEc8wAxk5gbDTmt/VNw7Qep+Pc9fZ4 +7z5HhcS9THAwb9aFukDnB+APl7S2xp2N9fSHrb0OB27KEGSvRSF2XP/IYWI3MjNg +Qq3Av3jrkm/yFkVj1pELv0eu+qdIyTSDlLRZF6ZYUGsUrg/Pif1i+cTxhBhtuNQE +litIfxBiMf3Hyx8GTXWJACKFQY3r2zzDu2Nx7dprzcss3aJhHOtRie/BYLe4i5fP +88VYuEwKWo1LJVBq4GyZcvhehHxVlJTb3SdfnsicSUzEhuTZl/2lhswSZQfhJ34C +bFHSgR3QHwpbUJSm5qJ/4Uz6MqPyPD5bQKdKzuFpRaMQ3x/+S28hXtzzvD/alGrV +cNKEC6Bq8q1Vy/4KDqyhq17FVh29FbU/TzJSAPzEW8usfydCLox9namPMjOMz5LW +gYKR8FHABwyWsDDOTsWQtfZ7Gpjb+3RdPyZ/iTRME/Blu0wvuGgC2YSy315z/9I0 +AE0C3gIjqFoGk3cP4A7VAoIBAQDMf+0potwuNQeZRZuTATyxn5qawwZ7b58rHwPw +NMtO/FNU8Vkc4/vXi5guRBCbB/u3nNBieulp3EJ217NfE3AGhe9zvY+ZT63YcVv2 +gT6BiBZZ+yzPsNbT3vhnOuSOZA7m+z8JzM5QhDR0LRYwnlIFf948GiAg4SAYG2+N +QWKtZqg559QfW41APBmw9RtZ0hPFBv6pQsvF0t1INc7oVbwX5xNwaKdzMvG2za9d +cKpXQrJtpaTF12x59RnmhzML1gzpZ1LWVSSXt1fgMxdzWRa/IcV+TLdF3+ikL7st +LcrqCZ4INeJalcXSA6mOV61yOwxAzrw1dkZ9qZV0YaW0DzM7AoIBAQDFpPDcHW6I +PTB3SXFYudCpbh/OLXBndSkk80YZ71VJIb8KtWN2KKZbGqnWOeJ17M3Hh5B0xjNT +y5L+AXsL+0G8deOtWORDPSpWm6Q7OJmJY67vVh9U7dA70VPUGdqljy4a1fAwzZNU +mI4gpqwWjCl3c/6c/R4QY85YgkdAgoLPIc0LJr58MTx8zT4oaY8IXf4Sa2xO5kAa +rk4CoDHZw97N6LP8v4fEMZiqQZ8Mqa0UbX8ORlyF3aKGh0QaAAn9j7aJpEwgcjWh +aBnGI2b7JTofqJIsSbvvFOnNHt1hnkncm7fVXRvcgguHeJ1pVGiSs5h6aMvJ7IiW +mnXBrBzgho4zAoIBAQDC0gC70MaYUrbpgxHia6RJx7Z/R9rOD5oAd6zF01X46pPs +8Xym9F9BimCxevCi8WkSFJfFqjjiPA8prvbYVek8na5wgh/iu7Dv6Zbl8Vz+BArf +MFYRivQuplXZ6pZBPPuhe6wjhvTqafia0TU5niqfyKCMe4suJ6rurHyKgsciURFl +EQHZ2dtoXZlQJ0ImQOfKpY5I7DS7QtbC61gxqTPnRaIUTe9w5RC3yZ4Ok74EIatg +oBSo0kEqsqE5KIYt+X8VgPS+8iBJVUandaUao73y2paOa0GSlOzKNhrIwL52VjEy +uzrod5UdLZYD4G2BzNUwjINrH0Gqh7u1Qy2cq3pvAoIBACbXDhpDkmglljOq9CJa +ib3yDUAIP/Gk3YwMXrdUCC+R+SgSk1QyEtcOe1fFElLYUWwnoOTB2m5aMC3IfrTR +EI8Hn9F+CYWJLJvOhEy7B7kvJL6V7xxSi7xlm5Kv7f7hD09owYXlsFFMlYmnF2Rq +8O8vlVami1TvOCq+l1//BdPMsa3CVGa1ikyATPnGHLypM/fMsoEi0HAt1ti/QGyq +CEvwsgY2YWjV0kmLEcV8Rq4gAnr8qswHzRug02pEnbH9nwKXjfpGV3G7smz0ohUy +sKRuDSO07cDDHFsZ+KlpYNyAoXTFkmcYC0n5Ev4S/2Xs80cC9yFcYU8vVXrU5uvc +pW8CggEBAKblNJAibR6wAUHNzHOGs3EDZB+w7h+1aFlDyAXJkBVspP5m62AmHEaN +Ja00jDulaNq1Xp3bQI0DnNtoly0ihjskawSgKXsKI+E79eK7kPeYEZ4qN26v6rDg +KCMF8357GjjP7QpI79GwhDyXUwFns3W5stgHaBprhjBAQKQNuqCjrYHpem4EZlNT +5fwhCP/G9BcvHw4cT/vt+jG24W5JFGnLNxtsdJIPsqQJQymIqISEdQgGk5/ppgla +VtFHIUtevjK72l8AAO0VRwrtAriILixPuTKM1nFj/lCG5hbFN+/xm1CXLyVCumkV +ImXgKS5UmJB53s9yiomen/n7cUXvrAk= +-----END PRIVATE KEY----- +` +} + +// certificate returns a ASN.1 DER encoded certificate for the private key suitable for testing. +func certificate() string { + return `-----BEGIN CERTIFICATE----- +MIIE6DCCAtACCQCzHO2a8qU6KzANBgkqhkiG9w0BAQsFADA2MRIwEAYDVQQDDAls +b2NhbGhvc3QxIDAeBgNVBAoMF0NsaWVudCBDZXJ0aWZpY2F0ZSBEZW1vMB4XDTE5 +MTIwNTE3MjY1N1oXDTIwMTIwNDE3MjY1N1owNjESMBAGA1UEAwwJbG9jYWxob3N0 +MSAwHgYDVQQKDBdDbGllbnQgQ2VydGlmaWNhdGUgRGVtbzCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAJ3iM9y8yWa9P+MRVj9Jj+rpNGDDrS1z2kl7SgDB +ZisFYCHccvLfnz10OCWkI4fUWeAbsg2QJZ8i1EdFDtWnqhPXyqUsHRZRAI92ujvf +4g9r7ID0MENfVKOIY7ye3aphHUEW5lAkkJae12QcxyjTbrbfE0kQXpBHYaqFPRZs +CTXZOOZfSuEVxcrRJMjlt1J8thnGGmaa3yH6o1yt6hQdTp9JNzzeUwWe78PaAms8 +RaLcaDXC7fsgByI8j9coKOluQdinYxkBLAHMpo7x6QuOYA3oguZXAUpMXJMJCQD2 +Nd0WL33Dy4XHcPrhw+lfCVW7E1sbWO3Ka1ZVeu0FEF0erOKAmfQk2eoMJzR9qKwm +kbM2rYq5qrN19a5Rdtxxov4zOuOyvI6b1Uz5PajN1dyXzuKImOXFmiEL7ykC8kxD +u8HA90pC2VK3V0mx6tsR/H6zMBIg2je51nJ/11VCmIgS5/+jso1h+oUtqHAsWi1/ +5u8lrQzMC3CR3VKLGCWhfF7NpQ82DYLnBh60t/E4mY0WX7GDAY8QTMKd4dnmKU6d +nKYDzXZR1he1c08+6NX+pdzJxih8Q/EG0PkNNla0MabMDsi7eFMUCjSPOUG99vGW +PNvMqP/EvCmCW7VKmph4NSNHjwkxTTOQD/ZGX9IpQWZkCxyCIMxAi1hCgu0zKR++ +U+7BAgMBAAEwDQYJKoZIhvcNAQELBQADggIBAC8av9I4ezwlmM7ysaJvC1IfCzNN +VawIK1U7bfj9Oyjl49Bn/yTwbbiQ8j5VjOza4umIwnYp1HP6/mlBO+ey8WFYPmDM +JAspk6sYEQW7MrbZ9QOmq24YAkwMzgK1hDASCKq4GJCzGDym3Zx6fvPnMCPdei2c +jgtjzzBmyewE0zcegOHDrFXTaUIfoSbduTbV9zClJ/pJDkTklRX1cYBtIox77gpZ +1cnIC803gi1rVCHRNdq85ltOTjoF1/wVamLy5c6CYlp5IPyVOm0nqbqra47QIwss +QSGxn5l52BC1jP1l3eK1mEr64+dbMhqX3ZQwhfuiQ9VmdovNN1NarPWfmQia6Spq +TfxN+3VhloKFUh+fgwNzWYLKCMnwBuPVhVGcpclvrj50MsyeiT2IfE8pqWw26g6g +0xu85AbqYKePaZ7wPoDddbwCIGr6BBT87Nsu+AqtnkH3uw34FDDcjWR1CmNuI1mP +ac6d1jdfbkL5ZUJTpTJi0BxWbTGupv8VzufteFRNa7U2h1O6+kyPmEpA3heEZcEO +Hq5zIfW6QTUmBXDfMFzQ9h0764oBVwm29bjZ59bU3RhcAZtL8fi5BapNtoKAy55d +P/0WahbwNjP68QYVLBeK9Sfo0XxLU0hJP4RJUZSXy9kUuZ8xhAM/6PdE04cDq71v +Zfq6/HA3phy85qyj +-----END CERTIFICATE----- +` +} + +// caCert returns a CA certificate suitable for testing. +func caCert() string { + return `-----BEGIN CERTIFICATE----- +MIICUTCCAfugAwIBAgIBADANBgkqhkiG9w0BAQQFADBXMQswCQYDVQQGEwJDTjEL +MAkGA1UECBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMC +VU4xFDASBgNVBAMTC0hlcm9uZyBZYW5nMB4XDTA1MDcxNTIxMTk0N1oXDTA1MDgx +NDIxMTk0N1owVzELMAkGA1UEBhMCQ04xCzAJBgNVBAgTAlBOMQswCQYDVQQHEwJD +TjELMAkGA1UEChMCT04xCzAJBgNVBAsTAlVOMRQwEgYDVQQDEwtIZXJvbmcgWWFu +ZzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCp5hnG7ogBhtlynpOS21cBewKE/B7j +V14qeyslnr26xZUsSVko36ZnhiaO/zbMOoRcKK9vEcgMtcLFuQTWDl3RAgMBAAGj +gbEwga4wHQYDVR0OBBYEFFXI70krXeQDxZgbaCQoR4jUDncEMH8GA1UdIwR4MHaA +FFXI70krXeQDxZgbaCQoR4jUDncEoVukWTBXMQswCQYDVQQGEwJDTjELMAkGA1UE +CBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMCVU4xFDAS +BgNVBAMTC0hlcm9uZyBZYW5nggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEE +BQADQQA/ugzBrjjK9jcWnDVfGHlk3icNRq0oV7Ri32z/+HQX67aRfgZu7KWdI+Ju +Wm7DCfrPNGVwFWUQOmsPue9rZBgO +-----END CERTIFICATE----- +` +} diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/health_check.go b/vendor/github.com/fastly/go-fastly/v9/fastly/health_check.go index 12550fa98..f7fef4f3a 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/health_check.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/health_check.go @@ -53,7 +53,7 @@ func (c *Client) ListHealthChecks(i *ListHealthChecksInput) ([]*HealthCheck, err defer resp.Body.Close() var hcs []*HealthCheck - if err := decodeBodyMap(resp.Body, &hcs); err != nil { + if err := DecodeBodyMap(resp.Body, &hcs); err != nil { return nil, err } return hcs, nil @@ -114,7 +114,7 @@ func (c *Client) CreateHealthCheck(i *CreateHealthCheckInput) (*HealthCheck, err defer resp.Body.Close() var h *HealthCheck - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -151,7 +151,7 @@ func (c *Client) GetHealthCheck(i *GetHealthCheckInput) (*HealthCheck, error) { defer resp.Body.Close() var h *HealthCheck - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -217,7 +217,7 @@ func (c *Client) UpdateHealthCheck(i *UpdateHealthCheckInput) (*HealthCheck, err defer resp.Body.Close() var h *HealthCheck - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -254,7 +254,7 @@ func (c *Client) DeleteHealthCheck(i *DeleteHealthCheckInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/ip.go b/vendor/github.com/fastly/go-fastly/v9/fastly/ip.go index ee3910349..ea2795425 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/ip.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/ip.go @@ -12,7 +12,7 @@ func (c *Client) AllIPs() (v4, v6 IPAddrs, err error) { defer resp.Body.Close() var m map[string][]string - if err := decodeBodyMap(resp.Body, &m); err != nil { + if err := DecodeBodyMap(resp.Body, &m); err != nil { return nil, nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/kv_store.go b/vendor/github.com/fastly/go-fastly/v9/fastly/kv_store.go index b7be19c10..465ed0a22 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/kv_store.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/kv_store.go @@ -49,7 +49,7 @@ func (c *Client) CreateKVStore(i *CreateKVStoreInput) (*KVStore, error) { defer resp.Body.Close() var store *KVStore - if err := decodeBodyMap(resp.Body, &store); err != nil { + if err := DecodeBodyMap(resp.Body, &store); err != nil { return nil, err } return store, nil @@ -107,7 +107,7 @@ func (c *Client) ListKVStores(i *ListKVStoresInput) (*ListKVStoresResponse, erro defer resp.Body.Close() var output *ListKVStoresResponse - if err := decodeBodyMap(resp.Body, &output); err != nil { + if err := DecodeBodyMap(resp.Body, &output); err != nil { return nil, err } return output, nil @@ -187,7 +187,7 @@ func (c *Client) GetKVStore(i *GetKVStoreInput) (*KVStore, error) { defer resp.Body.Close() var output *KVStore - if err := decodeBodyMap(resp.Body, &output); err != nil { + if err := DecodeBodyMap(resp.Body, &output); err != nil { return nil, err } return output, nil @@ -296,7 +296,7 @@ func (c *Client) ListKVStoreKeys(i *ListKVStoreKeysInput) (*ListKVStoreKeysRespo defer resp.Body.Close() var output *ListKVStoreKeysResponse - if err := decodeBodyMap(resp.Body, &output); err != nil { + if err := DecodeBodyMap(resp.Body, &output); err != nil { return nil, err } return output, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balance_pool.go b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balance_pool.go index 9621ff9a2..c87c3d4f9 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balance_pool.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balance_pool.go @@ -76,7 +76,7 @@ func (c *Client) ListPools(i *ListPoolsInput) ([]*Pool, error) { defer resp.Body.Close() var ps []*Pool - if err := decodeBodyMap(resp.Body, &ps); err != nil { + if err := DecodeBodyMap(resp.Body, &ps); err != nil { return nil, err } return ps, nil @@ -149,7 +149,7 @@ func (c *Client) CreatePool(i *CreatePoolInput) (*Pool, error) { defer resp.Body.Close() var p *Pool - if err := decodeBodyMap(resp.Body, &p); err != nil { + if err := DecodeBodyMap(resp.Body, &p); err != nil { return nil, err } return p, nil @@ -186,7 +186,7 @@ func (c *Client) GetPool(i *GetPoolInput) (*Pool, error) { defer resp.Body.Close() var p *Pool - if err := decodeBodyMap(resp.Body, &p); err != nil { + if err := DecodeBodyMap(resp.Body, &p); err != nil { return nil, err } return p, nil @@ -265,7 +265,7 @@ func (c *Client) UpdatePool(i *UpdatePoolInput) (*Pool, error) { defer resp.Body.Close() var p *Pool - if err := decodeBodyMap(resp.Body, &p); err != nil { + if err := DecodeBodyMap(resp.Body, &p); err != nil { return nil, err } return p, nil @@ -302,7 +302,7 @@ func (c *Client) DeletePool(i *DeletePoolInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director.go b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director.go index fbe275cd8..6a13a005d 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director.go @@ -64,7 +64,7 @@ func (c *Client) ListDirectors(i *ListDirectorsInput) ([]*Director, error) { defer resp.Body.Close() var ds []*Director - if err := decodeBodyMap(resp.Body, &ds); err != nil { + if err := DecodeBodyMap(resp.Body, &ds); err != nil { return nil, err } return ds, nil @@ -107,7 +107,7 @@ func (c *Client) CreateDirector(i *CreateDirectorInput) (*Director, error) { defer resp.Body.Close() var d *Director - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -143,7 +143,7 @@ func (c *Client) GetDirector(i *GetDirectorInput) (*Director, error) { defer resp.Body.Close() var d *Director - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -191,7 +191,7 @@ func (c *Client) UpdateDirector(i *UpdateDirectorInput) (*Director, error) { defer resp.Body.Close() var d *Director - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -227,7 +227,7 @@ func (c *Client) DeleteDirector(i *DeleteDirectorInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director_backend.go b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director_backend.go index d38fcacd3..c949544fc 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director_backend.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_director_backend.go @@ -54,7 +54,7 @@ func (c *Client) CreateDirectorBackend(i *CreateDirectorBackendInput) (*Director defer resp.Body.Close() var b *DirectorBackend - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -96,7 +96,7 @@ func (c *Client) GetDirectorBackend(i *GetDirectorBackendInput) (*DirectorBacken defer resp.Body.Close() var b *DirectorBackend - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -138,7 +138,7 @@ func (c *Client) DeleteDirectorBackend(i *DeleteDirectorBackendInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_server.go b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_server.go index cee9186b5..04dae1666 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_server.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/load_balancer_server.go @@ -47,7 +47,7 @@ func (c *Client) ListServers(i *ListServersInput) ([]*Server, error) { defer resp.Body.Close() var ss []*Server - if err := decodeBodyMap(resp.Body, &ss); err != nil { + if err := DecodeBodyMap(resp.Body, &ss); err != nil { return nil, err } return ss, nil @@ -94,7 +94,7 @@ func (c *Client) CreateServer(i *CreateServerInput) (*Server, error) { defer resp.Body.Close() var s *Server - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -131,7 +131,7 @@ func (c *Client) GetServer(i *GetServerInput) (*Server, error) { defer resp.Body.Close() var s *Server - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -182,7 +182,7 @@ func (c *Client) UpdateServer(i *UpdateServerInput) (*Server, error) { defer resp.Body.Close() var s *Server - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -219,7 +219,7 @@ func (c *Client) DeleteServer(i *DeleteServerInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_bigquery.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_bigquery.go index 7d454f0c3..308d4a240 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_bigquery.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_bigquery.go @@ -52,7 +52,7 @@ func (c *Client) ListBigQueries(i *ListBigQueriesInput) ([]*BigQuery, error) { defer resp.Body.Close() var bigQueries []*BigQuery - if err := decodeBodyMap(resp.Body, &bigQueries); err != nil { + if err := DecodeBodyMap(resp.Body, &bigQueries); err != nil { return nil, err } return bigQueries, nil @@ -107,7 +107,7 @@ func (c *Client) CreateBigQuery(i *CreateBigQueryInput) (*BigQuery, error) { defer resp.Body.Close() var bigQuery *BigQuery - if err := decodeBodyMap(resp.Body, &bigQuery); err != nil { + if err := DecodeBodyMap(resp.Body, &bigQuery); err != nil { return nil, err } return bigQuery, nil @@ -143,7 +143,7 @@ func (c *Client) GetBigQuery(i *GetBigQueryInput) (*BigQuery, error) { defer resp.Body.Close() var bigQuery *BigQuery - if err := decodeBodyMap(resp.Body, &bigQuery); err != nil { + if err := DecodeBodyMap(resp.Body, &bigQuery); err != nil { return nil, err } return bigQuery, nil @@ -203,7 +203,7 @@ func (c *Client) UpdateBigQuery(i *UpdateBigQueryInput) (*BigQuery, error) { defer resp.Body.Close() var bigQuery *BigQuery - if err := decodeBodyMap(resp.Body, &bigQuery); err != nil { + if err := DecodeBodyMap(resp.Body, &bigQuery); err != nil { return nil, err } return bigQuery, nil @@ -239,7 +239,7 @@ func (c *Client) DeleteBigQuery(i *DeleteBigQueryInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_blobstorage.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_blobstorage.go index a8fc9e83a..4c43dc7ff 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_blobstorage.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_blobstorage.go @@ -55,7 +55,7 @@ func (c *Client) ListBlobStorages(i *ListBlobStoragesInput) ([]*BlobStorage, err defer resp.Body.Close() var as []*BlobStorage - if err := decodeBodyMap(resp.Body, &as); err != nil { + if err := DecodeBodyMap(resp.Body, &as); err != nil { return nil, err } return as, nil @@ -118,7 +118,7 @@ func (c *Client) CreateBlobStorage(i *CreateBlobStorageInput) (*BlobStorage, err defer resp.Body.Close() var a *BlobStorage - if err := decodeBodyMap(resp.Body, &a); err != nil { + if err := DecodeBodyMap(resp.Body, &a); err != nil { return nil, err } return a, nil @@ -154,7 +154,7 @@ func (c *Client) GetBlobStorage(i *GetBlobStorageInput) (*BlobStorage, error) { defer resp.Body.Close() var a *BlobStorage - if err := decodeBodyMap(resp.Body, &a); err != nil { + if err := DecodeBodyMap(resp.Body, &a); err != nil { return nil, err } return a, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateBlobStorage(i *UpdateBlobStorageInput) (*BlobStorage, err defer resp.Body.Close() var a *BlobStorage - if err := decodeBodyMap(resp.Body, &a); err != nil { + if err := DecodeBodyMap(resp.Body, &a); err != nil { return nil, err } return a, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteBlobStorage(i *DeleteBlobStorageInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_cloudfiles.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_cloudfiles.go index 6cc6fab08..7aee1f366 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_cloudfiles.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_cloudfiles.go @@ -55,7 +55,7 @@ func (c *Client) ListCloudfiles(i *ListCloudfilesInput) ([]*Cloudfiles, error) { defer resp.Body.Close() var cloudfiles []*Cloudfiles - if err := decodeBodyMap(resp.Body, &cloudfiles); err != nil { + if err := DecodeBodyMap(resp.Body, &cloudfiles); err != nil { return nil, err } return cloudfiles, nil @@ -118,7 +118,7 @@ func (c *Client) CreateCloudfiles(i *CreateCloudfilesInput) (*Cloudfiles, error) defer resp.Body.Close() var cloudfiles *Cloudfiles - if err := decodeBodyMap(resp.Body, &cloudfiles); err != nil { + if err := DecodeBodyMap(resp.Body, &cloudfiles); err != nil { return nil, err } return cloudfiles, nil @@ -154,7 +154,7 @@ func (c *Client) GetCloudfiles(i *GetCloudfilesInput) (*Cloudfiles, error) { defer resp.Body.Close() var cloudfiles *Cloudfiles - if err := decodeBodyMap(resp.Body, &cloudfiles); err != nil { + if err := DecodeBodyMap(resp.Body, &cloudfiles); err != nil { return nil, err } return cloudfiles, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateCloudfiles(i *UpdateCloudfilesInput) (*Cloudfiles, error) defer resp.Body.Close() var cloudfiles *Cloudfiles - if err := decodeBodyMap(resp.Body, &cloudfiles); err != nil { + if err := DecodeBodyMap(resp.Body, &cloudfiles); err != nil { return nil, err } return cloudfiles, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteCloudfiles(i *DeleteCloudfilesInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_datadog.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_datadog.go index e9fc96462..bda1d494e 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_datadog.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_datadog.go @@ -46,7 +46,7 @@ func (c *Client) ListDatadog(i *ListDatadogInput) ([]*Datadog, error) { defer resp.Body.Close() var d []*Datadog - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -91,7 +91,7 @@ func (c *Client) CreateDatadog(i *CreateDatadogInput) (*Datadog, error) { defer resp.Body.Close() var d *Datadog - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -127,7 +127,7 @@ func (c *Client) GetDatadog(i *GetDatadogInput) (*Datadog, error) { defer resp.Body.Close() var d *Datadog - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -177,7 +177,7 @@ func (c *Client) UpdateDatadog(i *UpdateDatadogInput) (*Datadog, error) { defer resp.Body.Close() var d *Datadog - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -213,7 +213,7 @@ func (c *Client) DeleteDatadog(i *DeleteDatadogInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_digitalocean.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_digitalocean.go index db2e57a36..ef92561ba 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_digitalocean.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_digitalocean.go @@ -55,7 +55,7 @@ func (c *Client) ListDigitalOceans(i *ListDigitalOceansInput) ([]*DigitalOcean, defer resp.Body.Close() var digitaloceans []*DigitalOcean - if err := decodeBodyMap(resp.Body, &digitaloceans); err != nil { + if err := DecodeBodyMap(resp.Body, &digitaloceans); err != nil { return nil, err } return digitaloceans, nil @@ -118,7 +118,7 @@ func (c *Client) CreateDigitalOcean(i *CreateDigitalOceanInput) (*DigitalOcean, defer resp.Body.Close() var digitalocean *DigitalOcean - if err := decodeBodyMap(resp.Body, &digitalocean); err != nil { + if err := DecodeBodyMap(resp.Body, &digitalocean); err != nil { return nil, err } return digitalocean, nil @@ -154,7 +154,7 @@ func (c *Client) GetDigitalOcean(i *GetDigitalOceanInput) (*DigitalOcean, error) defer resp.Body.Close() var digitalocean *DigitalOcean - if err := decodeBodyMap(resp.Body, &digitalocean); err != nil { + if err := DecodeBodyMap(resp.Body, &digitalocean); err != nil { return nil, err } return digitalocean, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateDigitalOcean(i *UpdateDigitalOceanInput) (*DigitalOcean, defer resp.Body.Close() var digitalocean *DigitalOcean - if err := decodeBodyMap(resp.Body, &digitalocean); err != nil { + if err := DecodeBodyMap(resp.Body, &digitalocean); err != nil { return nil, err } return digitalocean, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteDigitalOcean(i *DeleteDigitalOceanInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_elasticsearch.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_elasticsearch.go index 20b3e0338..63af915a9 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_elasticsearch.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_elasticsearch.go @@ -55,7 +55,7 @@ func (c *Client) ListElasticsearch(i *ListElasticsearchInput) ([]*Elasticsearch, defer resp.Body.Close() var elasticsearch []*Elasticsearch - if err := decodeBodyMap(resp.Body, &elasticsearch); err != nil { + if err := DecodeBodyMap(resp.Body, &elasticsearch); err != nil { return nil, err } return elasticsearch, nil @@ -118,7 +118,7 @@ func (c *Client) CreateElasticsearch(i *CreateElasticsearchInput) (*Elasticsearc defer resp.Body.Close() var elasticsearch *Elasticsearch - if err := decodeBodyMap(resp.Body, &elasticsearch); err != nil { + if err := DecodeBodyMap(resp.Body, &elasticsearch); err != nil { return nil, err } return elasticsearch, nil @@ -154,7 +154,7 @@ func (c *Client) GetElasticsearch(i *GetElasticsearchInput) (*Elasticsearch, err defer resp.Body.Close() var es *Elasticsearch - if err := decodeBodyMap(resp.Body, &es); err != nil { + if err := DecodeBodyMap(resp.Body, &es); err != nil { return nil, err } @@ -224,7 +224,7 @@ func (c *Client) UpdateElasticsearch(i *UpdateElasticsearchInput) (*Elasticsearc defer resp.Body.Close() var es *Elasticsearch - if err := decodeBodyMap(resp.Body, &es); err != nil { + if err := DecodeBodyMap(resp.Body, &es); err != nil { return nil, err } return es, nil @@ -261,7 +261,7 @@ func (c *Client) DeleteElasticsearch(i *DeleteElasticsearchInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_ftp.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_ftp.go index 04ca012e0..73ae732f6 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_ftp.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_ftp.go @@ -55,7 +55,7 @@ func (c *Client) ListFTPs(i *ListFTPsInput) ([]*FTP, error) { defer resp.Body.Close() var ftps []*FTP - if err := decodeBodyMap(resp.Body, &ftps); err != nil { + if err := DecodeBodyMap(resp.Body, &ftps); err != nil { return nil, err } return ftps, nil @@ -118,7 +118,7 @@ func (c *Client) CreateFTP(i *CreateFTPInput) (*FTP, error) { defer resp.Body.Close() var ftp *FTP - if err := decodeBodyMap(resp.Body, &ftp); err != nil { + if err := DecodeBodyMap(resp.Body, &ftp); err != nil { return nil, err } return ftp, nil @@ -154,7 +154,7 @@ func (c *Client) GetFTP(i *GetFTPInput) (*FTP, error) { defer resp.Body.Close() var b *FTP - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateFTP(i *UpdateFTPInput) (*FTP, error) { defer resp.Body.Close() var b *FTP - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteFTP(i *DeleteFTPInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_gcs.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_gcs.go index d93592b5e..fc03522dc 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_gcs.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_gcs.go @@ -55,7 +55,7 @@ func (c *Client) ListGCSs(i *ListGCSsInput) ([]*GCS, error) { defer resp.Body.Close() var gcses []*GCS - if err := decodeBodyMap(resp.Body, &gcses); err != nil { + if err := DecodeBodyMap(resp.Body, &gcses); err != nil { return nil, err } return gcses, nil @@ -118,7 +118,7 @@ func (c *Client) CreateGCS(i *CreateGCSInput) (*GCS, error) { defer resp.Body.Close() var gcs *GCS - if err := decodeBodyMap(resp.Body, &gcs); err != nil { + if err := DecodeBodyMap(resp.Body, &gcs); err != nil { return nil, err } return gcs, nil @@ -154,7 +154,7 @@ func (c *Client) GetGCS(i *GetGCSInput) (*GCS, error) { defer resp.Body.Close() var b *GCS - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateGCS(i *UpdateGCSInput) (*GCS, error) { defer resp.Body.Close() var b *GCS - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteGCS(i *DeleteGCSInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_grafanacloudlogs.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_grafanacloudlogs.go index f5c32ef23..5a0d793d3 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_grafanacloudlogs.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_grafanacloudlogs.go @@ -50,7 +50,7 @@ func (c *Client) ListGrafanaCloudLogs(i *ListGrafanaCloudLogsInput) ([]*GrafanaC defer resp.Body.Close() var d []*GrafanaCloudLogs - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -101,7 +101,7 @@ func (c *Client) CreateGrafanaCloudLogs(i *CreateGrafanaCloudLogsInput) (*Grafan defer resp.Body.Close() var d *GrafanaCloudLogs - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -137,7 +137,7 @@ func (c *Client) GetGrafanaCloudLogs(i *GetGrafanaCloudLogsInput) (*GrafanaCloud defer resp.Body.Close() var d *GrafanaCloudLogs - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -193,7 +193,7 @@ func (c *Client) UpdateGrafanaCloudLogs(i *UpdateGrafanaCloudLogsInput) (*Grafan defer resp.Body.Close() var d *GrafanaCloudLogs - if err := decodeBodyMap(resp.Body, &d); err != nil { + if err := DecodeBodyMap(resp.Body, &d); err != nil { return nil, err } return d, nil @@ -229,7 +229,7 @@ func (c *Client) DeleteGrafanaCloudLogs(i *DeleteGrafanaCloudLogsInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_heroku.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_heroku.go index 12c06bbcc..e1698dd01 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_heroku.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_heroku.go @@ -46,7 +46,7 @@ func (c *Client) ListHerokus(i *ListHerokusInput) ([]*Heroku, error) { defer resp.Body.Close() var hs []*Heroku - if err := decodeBodyMap(resp.Body, &hs); err != nil { + if err := DecodeBodyMap(resp.Body, &hs); err != nil { return nil, err } return hs, nil @@ -91,7 +91,7 @@ func (c *Client) CreateHeroku(i *CreateHerokuInput) (*Heroku, error) { defer resp.Body.Close() var h *Heroku - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -127,7 +127,7 @@ func (c *Client) GetHeroku(i *GetHerokuInput) (*Heroku, error) { defer resp.Body.Close() var h *Heroku - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -177,7 +177,7 @@ func (c *Client) UpdateHeroku(i *UpdateHerokuInput) (*Heroku, error) { defer resp.Body.Close() var h *Heroku - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -213,7 +213,7 @@ func (c *Client) DeleteHeroku(i *DeleteHerokuInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_honeycomb.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_honeycomb.go index 0bd6555f9..2f33dffd2 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_honeycomb.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_honeycomb.go @@ -46,7 +46,7 @@ func (c *Client) ListHoneycombs(i *ListHoneycombsInput) ([]*Honeycomb, error) { defer resp.Body.Close() var hs []*Honeycomb - if err := decodeBodyMap(resp.Body, &hs); err != nil { + if err := DecodeBodyMap(resp.Body, &hs); err != nil { return nil, err } return hs, nil @@ -91,7 +91,7 @@ func (c *Client) CreateHoneycomb(i *CreateHoneycombInput) (*Honeycomb, error) { defer resp.Body.Close() var h *Honeycomb - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -127,7 +127,7 @@ func (c *Client) GetHoneycomb(i *GetHoneycombInput) (*Honeycomb, error) { defer resp.Body.Close() var h *Honeycomb - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -177,7 +177,7 @@ func (c *Client) UpdateHoneycomb(i *UpdateHoneycombInput) (*Honeycomb, error) { defer resp.Body.Close() var h *Honeycomb - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -213,7 +213,7 @@ func (c *Client) DeleteHoneycomb(i *DeleteHoneycombInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_https.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_https.go index 5f1befc5b..8571c5c8d 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_https.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_https.go @@ -57,7 +57,7 @@ func (c *Client) ListHTTPS(i *ListHTTPSInput) ([]*HTTPS, error) { defer resp.Body.Close() var https []*HTTPS - if err := decodeBodyMap(resp.Body, &https); err != nil { + if err := DecodeBodyMap(resp.Body, &https); err != nil { return nil, err } return https, nil @@ -124,7 +124,7 @@ func (c *Client) CreateHTTPS(i *CreateHTTPSInput) (*HTTPS, error) { defer resp.Body.Close() var https *HTTPS - if err := decodeBodyMap(resp.Body, &https); err != nil { + if err := DecodeBodyMap(resp.Body, &https); err != nil { return nil, err } return https, nil @@ -160,7 +160,7 @@ func (c *Client) GetHTTPS(i *GetHTTPSInput) (*HTTPS, error) { defer resp.Body.Close() var h *HTTPS - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } @@ -233,7 +233,7 @@ func (c *Client) UpdateHTTPS(i *UpdateHTTPSInput) (*HTTPS, error) { defer resp.Body.Close() var h *HTTPS - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } return h, nil @@ -269,7 +269,7 @@ func (c *Client) DeleteHTTPS(i *DeleteHTTPSInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kafka.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kafka.go index 9bab65bff..dec63cb58 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kafka.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kafka.go @@ -58,7 +58,7 @@ func (c *Client) ListKafkas(i *ListKafkasInput) ([]*Kafka, error) { defer resp.Body.Close() var k []*Kafka - if err := decodeBodyMap(resp.Body, &k); err != nil { + if err := DecodeBodyMap(resp.Body, &k); err != nil { return nil, err } return k, nil @@ -127,7 +127,7 @@ func (c *Client) CreateKafka(i *CreateKafkaInput) (*Kafka, error) { defer resp.Body.Close() var k *Kafka - if err := decodeBodyMap(resp.Body, &k); err != nil { + if err := DecodeBodyMap(resp.Body, &k); err != nil { return nil, err } return k, nil @@ -163,7 +163,7 @@ func (c *Client) GetKafka(i *GetKafkaInput) (*Kafka, error) { defer resp.Body.Close() var k *Kafka - if err := decodeBodyMap(resp.Body, &k); err != nil { + if err := DecodeBodyMap(resp.Body, &k); err != nil { return nil, err } return k, nil @@ -237,7 +237,7 @@ func (c *Client) UpdateKafka(i *UpdateKafkaInput) (*Kafka, error) { defer resp.Body.Close() var k *Kafka - if err := decodeBodyMap(resp.Body, &k); err != nil { + if err := DecodeBodyMap(resp.Body, &k); err != nil { return nil, err } return k, nil @@ -273,7 +273,7 @@ func (c *Client) DeleteKafka(i *DeleteKafkaInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kinesis.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kinesis.go index 12adc274e..471877c05 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kinesis.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_kinesis.go @@ -49,7 +49,7 @@ func (c *Client) ListKinesis(i *ListKinesisInput) ([]*Kinesis, error) { defer resp.Body.Close() var kineses []*Kinesis - if err := decodeBodyMap(resp.Body, &kineses); err != nil { + if err := DecodeBodyMap(resp.Body, &kineses); err != nil { return nil, err } return kineses, nil @@ -100,7 +100,7 @@ func (c *Client) CreateKinesis(i *CreateKinesisInput) (*Kinesis, error) { defer resp.Body.Close() var kinesis *Kinesis - if err := decodeBodyMap(resp.Body, &kinesis); err != nil { + if err := DecodeBodyMap(resp.Body, &kinesis); err != nil { return nil, err } return kinesis, nil @@ -136,7 +136,7 @@ func (c *Client) GetKinesis(i *GetKinesisInput) (*Kinesis, error) { defer resp.Body.Close() var kinesis *Kinesis - if err := decodeBodyMap(resp.Body, &kinesis); err != nil { + if err := DecodeBodyMap(resp.Body, &kinesis); err != nil { return nil, err } return kinesis, nil @@ -192,7 +192,7 @@ func (c *Client) UpdateKinesis(i *UpdateKinesisInput) (*Kinesis, error) { defer resp.Body.Close() var kinesis *Kinesis - if err := decodeBodyMap(resp.Body, &kinesis); err != nil { + if err := DecodeBodyMap(resp.Body, &kinesis); err != nil { return nil, err } return kinesis, nil @@ -228,7 +228,7 @@ func (c *Client) DeleteKinesis(i *DeleteKinesisInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logentries.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logentries.go index f349b41d1..34221ddc0 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logentries.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logentries.go @@ -48,7 +48,7 @@ func (c *Client) ListLogentries(i *ListLogentriesInput) ([]*Logentries, error) { defer resp.Body.Close() var ls []*Logentries - if err := decodeBodyMap(resp.Body, &ls); err != nil { + if err := DecodeBodyMap(resp.Body, &ls); err != nil { return nil, err } return ls, nil @@ -97,7 +97,7 @@ func (c *Client) CreateLogentries(i *CreateLogentriesInput) (*Logentries, error) defer resp.Body.Close() var l *Logentries - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -133,7 +133,7 @@ func (c *Client) GetLogentries(i *GetLogentriesInput) (*Logentries, error) { defer resp.Body.Close() var l *Logentries - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -187,7 +187,7 @@ func (c *Client) UpdateLogentries(i *UpdateLogentriesInput) (*Logentries, error) defer resp.Body.Close() var l *Logentries - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -223,7 +223,7 @@ func (c *Client) DeleteLogentries(i *DeleteLogentriesInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_loggly.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_loggly.go index 2c033d67a..fc59babe2 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_loggly.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_loggly.go @@ -45,7 +45,7 @@ func (c *Client) ListLoggly(i *ListLogglyInput) ([]*Loggly, error) { defer resp.Body.Close() var ls []*Loggly - if err := decodeBodyMap(resp.Body, &ls); err != nil { + if err := DecodeBodyMap(resp.Body, &ls); err != nil { return nil, err } return ls, nil @@ -88,7 +88,7 @@ func (c *Client) CreateLoggly(i *CreateLogglyInput) (*Loggly, error) { defer resp.Body.Close() var l *Loggly - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -124,7 +124,7 @@ func (c *Client) GetLoggly(i *GetLogglyInput) (*Loggly, error) { defer resp.Body.Close() var l *Loggly - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -172,7 +172,7 @@ func (c *Client) UpdateLoggly(i *UpdateLogglyInput) (*Loggly, error) { defer resp.Body.Close() var l *Loggly - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -208,7 +208,7 @@ func (c *Client) DeleteLoggly(i *DeleteLogglyInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logshuttle.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logshuttle.go index 0fa83a88a..669d38b6f 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logshuttle.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_logshuttle.go @@ -46,7 +46,7 @@ func (c *Client) ListLogshuttles(i *ListLogshuttlesInput) ([]*Logshuttle, error) defer resp.Body.Close() var ls []*Logshuttle - if err := decodeBodyMap(resp.Body, &ls); err != nil { + if err := DecodeBodyMap(resp.Body, &ls); err != nil { return nil, err } return ls, nil @@ -91,7 +91,7 @@ func (c *Client) CreateLogshuttle(i *CreateLogshuttleInput) (*Logshuttle, error) defer resp.Body.Close() var l *Logshuttle - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -127,7 +127,7 @@ func (c *Client) GetLogshuttle(i *GetLogshuttleInput) (*Logshuttle, error) { defer resp.Body.Close() var l *Logshuttle - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -177,7 +177,7 @@ func (c *Client) UpdateLogshuttle(i *UpdateLogshuttleInput) (*Logshuttle, error) defer resp.Body.Close() var l *Logshuttle - if err := decodeBodyMap(resp.Body, &l); err != nil { + if err := DecodeBodyMap(resp.Body, &l); err != nil { return nil, err } return l, nil @@ -213,7 +213,7 @@ func (c *Client) DeleteLogshuttle(i *DeleteLogshuttleInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelic.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelic.go index fb923449e..bc4d4ace7 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelic.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelic.go @@ -46,7 +46,7 @@ func (c *Client) ListNewRelic(i *ListNewRelicInput) ([]*NewRelic, error) { defer resp.Body.Close() var n []*NewRelic - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -91,7 +91,7 @@ func (c *Client) CreateNewRelic(i *CreateNewRelicInput) (*NewRelic, error) { defer resp.Body.Close() var n *NewRelic - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -127,7 +127,7 @@ func (c *Client) GetNewRelic(i *GetNewRelicInput) (*NewRelic, error) { defer resp.Body.Close() var n *NewRelic - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -177,7 +177,7 @@ func (c *Client) UpdateNewRelic(i *UpdateNewRelicInput) (*NewRelic, error) { defer resp.Body.Close() var n *NewRelic - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -213,7 +213,7 @@ func (c *Client) DeleteNewRelic(i *DeleteNewRelicInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelicotlp.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelicotlp.go index a85adc94d..efa78f542 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelicotlp.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_newrelicotlp.go @@ -49,7 +49,7 @@ func (c *Client) ListNewRelicOTLP(i *ListNewRelicOTLPInput) ([]*NewRelicOTLP, er defer resp.Body.Close() var n []*NewRelicOTLP - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -97,7 +97,7 @@ func (c *Client) CreateNewRelicOTLP(i *CreateNewRelicOTLPInput) (*NewRelicOTLP, defer resp.Body.Close() var n *NewRelicOTLP - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -133,7 +133,7 @@ func (c *Client) GetNewRelicOTLP(i *GetNewRelicOTLPInput) (*NewRelicOTLP, error) defer resp.Body.Close() var n *NewRelicOTLP - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -186,7 +186,7 @@ func (c *Client) UpdateNewRelicOTLP(i *UpdateNewRelicOTLPInput) (*NewRelicOTLP, defer resp.Body.Close() var n *NewRelicOTLP - if err := decodeBodyMap(resp.Body, &n); err != nil { + if err := DecodeBodyMap(resp.Body, &n); err != nil { return nil, err } return n, nil @@ -222,7 +222,7 @@ func (c *Client) DeleteNewRelicOTLP(i *DeleteNewRelicOTLPInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_openstack.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_openstack.go index 7ce544c6c..c61db9bde 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_openstack.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_openstack.go @@ -55,7 +55,7 @@ func (c *Client) ListOpenstack(i *ListOpenstackInput) ([]*Openstack, error) { defer resp.Body.Close() var openstacks []*Openstack - if err := decodeBodyMap(resp.Body, &openstacks); err != nil { + if err := DecodeBodyMap(resp.Body, &openstacks); err != nil { return nil, err } return openstacks, nil @@ -118,7 +118,7 @@ func (c *Client) CreateOpenstack(i *CreateOpenstackInput) (*Openstack, error) { defer resp.Body.Close() var openstack *Openstack - if err := decodeBodyMap(resp.Body, &openstack); err != nil { + if err := DecodeBodyMap(resp.Body, &openstack); err != nil { return nil, err } return openstack, nil @@ -154,7 +154,7 @@ func (c *Client) GetOpenstack(i *GetOpenstackInput) (*Openstack, error) { defer resp.Body.Close() var openstack *Openstack - if err := decodeBodyMap(resp.Body, &openstack); err != nil { + if err := DecodeBodyMap(resp.Body, &openstack); err != nil { return nil, err } return openstack, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateOpenstack(i *UpdateOpenstackInput) (*Openstack, error) { defer resp.Body.Close() var openstack *Openstack - if err := decodeBodyMap(resp.Body, &openstack); err != nil { + if err := DecodeBodyMap(resp.Body, &openstack); err != nil { return nil, err } return openstack, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteOpenstack(i *DeleteOpenstackInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_papertrail.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_papertrail.go index 739c0914f..593279514 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_papertrail.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_papertrail.go @@ -46,7 +46,7 @@ func (c *Client) ListPapertrails(i *ListPapertrailsInput) ([]*Papertrail, error) defer resp.Body.Close() var ps []*Papertrail - if err := decodeBodyMap(resp.Body, &ps); err != nil { + if err := DecodeBodyMap(resp.Body, &ps); err != nil { return nil, err } return ps, nil @@ -91,7 +91,7 @@ func (c *Client) CreatePapertrail(i *CreatePapertrailInput) (*Papertrail, error) defer resp.Body.Close() var p *Papertrail - if err := decodeBodyMap(resp.Body, &p); err != nil { + if err := DecodeBodyMap(resp.Body, &p); err != nil { return nil, err } return p, nil @@ -127,7 +127,7 @@ func (c *Client) GetPapertrail(i *GetPapertrailInput) (*Papertrail, error) { defer resp.Body.Close() var p *Papertrail - if err := decodeBodyMap(resp.Body, &p); err != nil { + if err := DecodeBodyMap(resp.Body, &p); err != nil { return nil, err } return p, nil @@ -177,7 +177,7 @@ func (c *Client) UpdatePapertrail(i *UpdatePapertrailInput) (*Papertrail, error) defer resp.Body.Close() var p *Papertrail - if err := decodeBodyMap(resp.Body, &p); err != nil { + if err := DecodeBodyMap(resp.Body, &p); err != nil { return nil, err } return p, nil @@ -213,7 +213,7 @@ func (c *Client) DeletePapertrail(i *DeletePapertrailInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_pubsub.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_pubsub.go index 5f21218d5..28d5d8a32 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_pubsub.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_pubsub.go @@ -49,7 +49,7 @@ func (c *Client) ListPubsubs(i *ListPubsubsInput) ([]*Pubsub, error) { defer resp.Body.Close() var pubsubs []*Pubsub - if err := decodeBodyMap(resp.Body, &pubsubs); err != nil { + if err := DecodeBodyMap(resp.Body, &pubsubs); err != nil { return nil, err } return pubsubs, nil @@ -100,7 +100,7 @@ func (c *Client) CreatePubsub(i *CreatePubsubInput) (*Pubsub, error) { defer resp.Body.Close() var pubsub *Pubsub - if err := decodeBodyMap(resp.Body, &pubsub); err != nil { + if err := DecodeBodyMap(resp.Body, &pubsub); err != nil { return nil, err } return pubsub, nil @@ -136,7 +136,7 @@ func (c *Client) GetPubsub(i *GetPubsubInput) (*Pubsub, error) { defer resp.Body.Close() var b *Pubsub - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -192,7 +192,7 @@ func (c *Client) UpdatePubsub(i *UpdatePubsubInput) (*Pubsub, error) { defer resp.Body.Close() var b *Pubsub - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -228,7 +228,7 @@ func (c *Client) DeletePubsub(i *DeletePubsubInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_s3.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_s3.go index ecb2c1988..06d377793 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_s3.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_s3.go @@ -109,7 +109,7 @@ func (c *Client) ListS3s(i *ListS3sInput) ([]*S3, error) { defer resp.Body.Close() var s3s []*S3 - if err := decodeBodyMap(resp.Body, &s3s); err != nil { + if err := DecodeBodyMap(resp.Body, &s3s); err != nil { return nil, err } return s3s, nil @@ -190,7 +190,7 @@ func (c *Client) CreateS3(i *CreateS3Input) (*S3, error) { defer resp.Body.Close() var s3 *S3 - if err := decodeBodyMap(resp.Body, &s3); err != nil { + if err := DecodeBodyMap(resp.Body, &s3); err != nil { return nil, err } return s3, nil @@ -226,7 +226,7 @@ func (c *Client) GetS3(i *GetS3Input) (*S3, error) { defer resp.Body.Close() var s3 *S3 - if err := decodeBodyMap(resp.Body, &s3); err != nil { + if err := DecodeBodyMap(resp.Body, &s3); err != nil { return nil, err } return s3, nil @@ -312,7 +312,7 @@ func (c *Client) UpdateS3(i *UpdateS3Input) (*S3, error) { defer resp.Body.Close() var s3 *S3 - if err := decodeBodyMap(resp.Body, &s3); err != nil { + if err := DecodeBodyMap(resp.Body, &s3); err != nil { return nil, err } return s3, nil @@ -348,7 +348,7 @@ func (c *Client) DeleteS3(i *DeleteS3Input) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_scalyr.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_scalyr.go index a83148963..5d19adc8f 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_scalyr.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_scalyr.go @@ -47,7 +47,7 @@ func (c *Client) ListScalyrs(i *ListScalyrsInput) ([]*Scalyr, error) { defer resp.Body.Close() var ss []*Scalyr - if err := decodeBodyMap(resp.Body, &ss); err != nil { + if err := DecodeBodyMap(resp.Body, &ss); err != nil { return nil, err } return ss, nil @@ -94,7 +94,7 @@ func (c *Client) CreateScalyr(i *CreateScalyrInput) (*Scalyr, error) { defer resp.Body.Close() var s *Scalyr - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -130,7 +130,7 @@ func (c *Client) GetScalyr(i *GetScalyrInput) (*Scalyr, error) { defer resp.Body.Close() var s *Scalyr - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -182,7 +182,7 @@ func (c *Client) UpdateScalyr(i *UpdateScalyrInput) (*Scalyr, error) { defer resp.Body.Close() var s *Scalyr - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -218,7 +218,7 @@ func (c *Client) DeleteScalyr(i *DeleteScalyrInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sftp.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sftp.go index 6bef274fc..dcf5b9578 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sftp.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sftp.go @@ -57,7 +57,7 @@ func (c *Client) ListSFTPs(i *ListSFTPsInput) ([]*SFTP, error) { defer resp.Body.Close() var sftps []*SFTP - if err := decodeBodyMap(resp.Body, &sftps); err != nil { + if err := DecodeBodyMap(resp.Body, &sftps); err != nil { return nil, err } return sftps, nil @@ -124,7 +124,7 @@ func (c *Client) CreateSFTP(i *CreateSFTPInput) (*SFTP, error) { defer resp.Body.Close() var ftp *SFTP - if err := decodeBodyMap(resp.Body, &ftp); err != nil { + if err := DecodeBodyMap(resp.Body, &ftp); err != nil { return nil, err } return ftp, nil @@ -160,7 +160,7 @@ func (c *Client) GetSFTP(i *GetSFTPInput) (*SFTP, error) { defer resp.Body.Close() var b *SFTP - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -232,7 +232,7 @@ func (c *Client) UpdateSFTP(i *UpdateSFTPInput) (*SFTP, error) { defer resp.Body.Close() var b *SFTP - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -268,7 +268,7 @@ func (c *Client) DeleteSFTP(i *DeleteSFTPInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_splunk.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_splunk.go index a3f9340ad..124866a48 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_splunk.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_splunk.go @@ -53,7 +53,7 @@ func (c *Client) ListSplunks(i *ListSplunksInput) ([]*Splunk, error) { defer resp.Body.Close() var ss []*Splunk - if err := decodeBodyMap(resp.Body, &ss); err != nil { + if err := DecodeBodyMap(resp.Body, &ss); err != nil { return nil, err } return ss, nil @@ -112,7 +112,7 @@ func (c *Client) CreateSplunk(i *CreateSplunkInput) (*Splunk, error) { defer resp.Body.Close() var s *Splunk - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -148,7 +148,7 @@ func (c *Client) GetSplunk(i *GetSplunkInput) (*Splunk, error) { defer resp.Body.Close() var s *Splunk - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -212,7 +212,7 @@ func (c *Client) UpdateSplunk(i *UpdateSplunkInput) (*Splunk, error) { defer resp.Body.Close() var s *Splunk - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -248,7 +248,7 @@ func (c *Client) DeleteSplunk(i *DeleteSplunkInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sumologic.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sumologic.go index cfab88217..b6ac2615f 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sumologic.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_sumologic.go @@ -47,7 +47,7 @@ func (c *Client) ListSumologics(i *ListSumologicsInput) ([]*Sumologic, error) { defer resp.Body.Close() var ss []*Sumologic - if err := decodeBodyMap(resp.Body, &ss); err != nil { + if err := DecodeBodyMap(resp.Body, &ss); err != nil { return nil, err } return ss, nil @@ -92,7 +92,7 @@ func (c *Client) CreateSumologic(i *CreateSumologicInput) (*Sumologic, error) { defer resp.Body.Close() var s *Sumologic - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -128,7 +128,7 @@ func (c *Client) GetSumologic(i *GetSumologicInput) (*Sumologic, error) { defer resp.Body.Close() var s *Sumologic - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -179,7 +179,7 @@ func (c *Client) UpdateSumologic(i *UpdateSumologicInput) (*Sumologic, error) { defer resp.Body.Close() var s *Sumologic - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -215,7 +215,7 @@ func (c *Client) DeleteSumologic(i *DeleteSumologicInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_syslog.go b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_syslog.go index efd2bfa31..27a06d96c 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/logging_syslog.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/logging_syslog.go @@ -55,7 +55,7 @@ func (c *Client) ListSyslogs(i *ListSyslogsInput) ([]*Syslog, error) { defer resp.Body.Close() var ss []*Syslog - if err := decodeBodyMap(resp.Body, &ss); err != nil { + if err := DecodeBodyMap(resp.Body, &ss); err != nil { return nil, err } return ss, nil @@ -118,7 +118,7 @@ func (c *Client) CreateSyslog(i *CreateSyslogInput) (*Syslog, error) { defer resp.Body.Close() var s *Syslog - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -154,7 +154,7 @@ func (c *Client) GetSyslog(i *GetSyslogInput) (*Syslog, error) { defer resp.Body.Close() var s *Syslog - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -222,7 +222,7 @@ func (c *Client) UpdateSyslog(i *UpdateSyslogInput) (*Syslog, error) { defer resp.Body.Close() var s *Syslog - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteSyslog(i *DeleteSyslogInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/managed_logging.go b/vendor/github.com/fastly/go-fastly/v9/fastly/managed_logging.go index 688595d94..3e0efe2ae 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/managed_logging.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/managed_logging.go @@ -61,7 +61,7 @@ func (c *Client) CreateManagedLogging(i *CreateManagedLoggingInput) (*ManagedLog defer resp.Body.Close() var m *ManagedLogging - if err := decodeBodyMap(resp.Body, &m); err != nil { + if err := DecodeBodyMap(resp.Body, &m); err != nil { return nil, err } return m, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/paginator.go b/vendor/github.com/fastly/go-fastly/v9/fastly/paginator.go index c7886d9ef..aa9ee3aca 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/paginator.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/paginator.go @@ -133,7 +133,7 @@ func (p *ListPaginator[T]) GetNext() ([]*T, error) { p.consumed = true var s []*T - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/product_enablement.go b/vendor/github.com/fastly/go-fastly/v9/fastly/product_enablement.go index b162d7811..913b48635 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/product_enablement.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/product_enablement.go @@ -59,6 +59,9 @@ type ProductEnablementInput struct { } // GetProduct retrieves the details of the product enabled on the service. +// +// Deprecated: The 'Get' functions in the product-specific packages +// should be used instead of this function. func (c *Client) GetProduct(i *ProductEnablementInput) (*ProductEnablement, error) { if i.ProductID == ProductUndefined { return nil, ErrMissingProductID @@ -76,7 +79,7 @@ func (c *Client) GetProduct(i *ProductEnablementInput) (*ProductEnablement, erro defer resp.Body.Close() var h *ProductEnablement - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } @@ -84,6 +87,9 @@ func (c *Client) GetProduct(i *ProductEnablementInput) (*ProductEnablement, erro } // EnableProduct enables the specified product on the service. +// +// Deprecated: The 'Enable' functions in the product-specific packages +// should be used instead of this function. func (c *Client) EnableProduct(i *ProductEnablementInput) (*ProductEnablement, error) { if i.ProductID == ProductUndefined { return nil, ErrMissingProductID @@ -101,13 +107,16 @@ func (c *Client) EnableProduct(i *ProductEnablementInput) (*ProductEnablement, e defer resp.Body.Close() var http3 *ProductEnablement - if err := decodeBodyMap(resp.Body, &http3); err != nil { + if err := DecodeBodyMap(resp.Body, &http3); err != nil { return nil, err } return http3, nil } // DisableProduct disables the specified product on the service. +// +// Deprecated: The 'Disable' functions in the product-specific packages +// should be used instead of this function. func (c *Client) DisableProduct(i *ProductEnablementInput) error { if i.ProductID == ProductUndefined { return ErrMissingProductID diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/purge.go b/vendor/github.com/fastly/go-fastly/v9/fastly/purge.go index 4113e4848..2fd70ebc1 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/purge.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/purge.go @@ -50,7 +50,7 @@ func (c *Client) Purge(i *PurgeInput) (*Purge, error) { defer resp.Body.Close() var r *Purge - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return nil, err } return r, nil @@ -113,7 +113,7 @@ func (c *Client) PurgeKey(i *PurgeKeyInput) (*Purge, error) { defer resp.Body.Close() var r *Purge - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return nil, err } return r, nil @@ -160,7 +160,7 @@ func (c *Client) PurgeKeys(i *PurgeKeysInput) (map[string]string, error) { defer resp.Body.Close() var r map[string]string - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return nil, err } return r, nil @@ -192,7 +192,7 @@ func (c *Client) PurgeAll(i *PurgeAllInput) (*Purge, error) { defer resp.Body.Close() var r *Purge - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return nil, err } return r, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/resource.go b/vendor/github.com/fastly/go-fastly/v9/fastly/resource.go index 4cc3bda62..b8c59b3ce 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/resource.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/resource.go @@ -55,7 +55,7 @@ func (c *Client) ListResources(i *ListResourcesInput) ([]*Resource, error) { defer resp.Body.Close() var rs []*Resource - if err := decodeBodyMap(resp.Body, &rs); err != nil { + if err := DecodeBodyMap(resp.Body, &rs); err != nil { return nil, err } return rs, nil @@ -95,7 +95,7 @@ func (c *Client) CreateResource(i *CreateResourceInput) (*Resource, error) { defer resp.Body.Close() var r *Resource - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return nil, err } return r, nil @@ -131,7 +131,7 @@ func (c *Client) GetResource(i *GetResourceInput) (*Resource, error) { defer resp.Body.Close() var r *Resource - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return nil, err } return r, nil @@ -169,7 +169,7 @@ func (c *Client) UpdateResource(i *UpdateResourceInput) (*Resource, error) { defer resp.Body.Close() var b *Resource - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -205,7 +205,7 @@ func (c *Client) DeleteResource(i *DeleteResourceInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/service_details.go b/vendor/github.com/fastly/go-fastly/v9/fastly/service_details.go index a0490e904..ad6a46e2c 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/service_details.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/service_details.go @@ -125,7 +125,7 @@ func (c *Client) CreateService(i *CreateServiceInput) (*Service, error) { defer resp.Body.Close() var s *Service - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -154,7 +154,7 @@ func (c *Client) GetService(i *GetServiceInput) (*Service, error) { defer resp.Body.Close() var s *Service - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } @@ -190,7 +190,7 @@ func (c *Client) GetServiceDetails(i *GetServiceInput) (*ServiceDetail, error) { defer resp.Body.Close() var s *ServiceDetail - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } @@ -222,7 +222,7 @@ func (c *Client) UpdateService(i *UpdateServiceInput) (*Service, error) { defer resp.Body.Close() var s *Service - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } return s, nil @@ -249,7 +249,7 @@ func (c *Client) DeleteService(i *DeleteServiceInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { @@ -283,7 +283,7 @@ func (c *Client) SearchService(i *SearchServiceInput) (*Service, error) { defer resp.Body.Close() var s *Service - if err := decodeBodyMap(resp.Body, &s); err != nil { + if err := DecodeBodyMap(resp.Body, &s); err != nil { return nil, err } @@ -313,7 +313,7 @@ func (c *Client) ListServiceDomains(i *ListServiceDomainInput) (ServiceDomainsLi var ds ServiceDomainsList - if err := decodeBodyMap(resp.Body, &ds); err != nil { + if err := DecodeBodyMap(resp.Body, &ds); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/service_version.go b/vendor/github.com/fastly/go-fastly/v9/fastly/service_version.go index 817668de5..9cce3df60 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/service_version.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/service_version.go @@ -49,7 +49,7 @@ func (c *Client) ListVersions(i *ListVersionsInput) ([]*Version, error) { defer resp.Body.Close() var e []*Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } @@ -108,7 +108,7 @@ func (c *Client) CreateVersion(i *CreateVersionInput) (*Version, error) { defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil @@ -139,7 +139,7 @@ func (c *Client) GetVersion(i *GetVersionInput) (*Version, error) { defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil @@ -172,7 +172,7 @@ func (c *Client) UpdateVersion(i *UpdateVersionInput) (*Version, error) { defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil @@ -211,7 +211,7 @@ func (c *Client) ActivateVersion(i *ActivateVersionInput) (*Version, error) { defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil @@ -250,7 +250,7 @@ func (c *Client) DeactivateVersion(i *DeactivateVersionInput) (*Version, error) defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil @@ -284,7 +284,7 @@ func (c *Client) CloneVersion(i *CloneVersionInput) (*Version, error) { defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil @@ -317,7 +317,7 @@ func (c *Client) ValidateVersion(i *ValidateVersionInput) (bool, string, error) defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return false, msg, err } @@ -350,7 +350,7 @@ func (c *Client) LockVersion(i *LockVersionInput) (*Version, error) { defer resp.Body.Close() var e *Version - if err := decodeBodyMap(resp.Body, &e); err != nil { + if err := DecodeBodyMap(resp.Body, &e); err != nil { return nil, err } return e, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/stats_domain_inspector.go b/vendor/github.com/fastly/go-fastly/v9/fastly/stats_domain_inspector.go index d9478edfe..46e9e4ac3 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/stats_domain_inspector.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/stats_domain_inspector.go @@ -117,7 +117,7 @@ type GetDomainMetricsInput struct { End *time.Time // GroupBy is the dimensions to return in the query. GroupBy []string - // Limit is the limit of returned data + // Limit specifies the maximum number of entries to be returned Limit *int // Metrics is the metric to retrieve. Up to ten metrics are accepted. Metrics []string diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/stats_historical.go b/vendor/github.com/fastly/go-fastly/v9/fastly/stats_historical.go index d82e05465..1b047f120 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/stats_historical.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/stats_historical.go @@ -262,7 +262,7 @@ func (c *Client) GetUsage(i *GetUsageInput) (*UsageResponse, error) { defer resp.Body.Close() var sr *UsageResponse - if err := decodeBodyMap(resp.Body, &sr); err != nil { + if err := DecodeBodyMap(resp.Body, &sr); err != nil { return nil, err } @@ -309,7 +309,7 @@ func (c *Client) GetUsageByService(i *GetUsageInput) (*UsageByServiceResponse, e defer resp.Body.Close() var sr *UsageByServiceResponse - if err := decodeBodyMap(resp.Body, &sr); err != nil { + if err := DecodeBodyMap(resp.Body, &sr); err != nil { return nil, err } @@ -352,7 +352,7 @@ func (c *Client) GetAggregateJSON(i *GetAggregateInput, dst any) error { return err } - if err = decodeBodyMap(resp.Body, &dst); err != nil { + if err = DecodeBodyMap(resp.Body, &dst); err != nil { return err } @@ -376,7 +376,7 @@ func (c *Client) GetRegions() (*RegionsResponse, error) { defer resp.Body.Close() var rr *RegionsResponse - if err := decodeBodyMap(resp.Body, &rr); err != nil { + if err := DecodeBodyMap(resp.Body, &rr); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/stats_origin_inspector.go b/vendor/github.com/fastly/go-fastly/v9/fastly/stats_origin_inspector.go index f724c8876..3943428fe 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/stats_origin_inspector.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/stats_origin_inspector.go @@ -91,6 +91,8 @@ type GetOriginMetricsInput struct { End *time.Time // GroupBy is the dimensions to return in the query. GroupBy []string + // Limit specifies the maximum number of entries to be returned + Limit *int // Hosts limits query to one or more specific origin hosts. Hosts []string // Metrics is the metric to retrieve. Up to ten metrics are accepted. @@ -146,6 +148,9 @@ func (c *Client) GetOriginMetricsForServiceJSON(i *GetOriginMetricsInput, dst an if i.Start != nil { ro.Params["start"] = strconv.FormatInt(i.Start.Unix(), 10) } + if i.Limit != nil { + ro.Params["limit"] = strconv.Itoa(*i.Limit) + } resp, err := c.Get(path, ro) if err != nil { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_activation.go b/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_activation.go index e94222a0f..4e8d9699d 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_activation.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_activation.go @@ -48,15 +48,13 @@ func (i *ListTLSActivationsInput) formatFilters() map[string]string { } for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_configuration.go b/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_configuration.go index 0b310ba82..8300fdad5 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_configuration.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/tls_custom_configuration.go @@ -52,15 +52,13 @@ func (i *ListCustomTLSConfigurationsInput) formatFilters() map[string]string { } for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/tls_private_keys.go b/vendor/github.com/fastly/go-fastly/v9/fastly/tls_private_keys.go index 2fcee28a2..ad9762e16 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/tls_private_keys.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/tls_private_keys.go @@ -47,15 +47,13 @@ func (i *ListPrivateKeysInput) formatFilters() map[string]string { } for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/token.go b/vendor/github.com/fastly/go-fastly/v9/fastly/token.go index d6dec3a7f..2f87ca244 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/token.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/token.go @@ -48,7 +48,7 @@ func (c *Client) ListTokens(_ *ListTokensInput) ([]*Token, error) { defer resp.Body.Close() var t []*Token - if err := decodeBodyMap(resp.Body, &t); err != nil { + if err := DecodeBodyMap(resp.Body, &t); err != nil { return nil, err } return t, nil @@ -75,7 +75,7 @@ func (c *Client) ListCustomerTokens(i *ListCustomerTokensInput) ([]*Token, error defer resp.Body.Close() var t []*Token - if err := decodeBodyMap(resp.Body, &t); err != nil { + if err := DecodeBodyMap(resp.Body, &t); err != nil { return nil, err } return t, nil @@ -93,7 +93,7 @@ func (c *Client) GetTokenSelf() (*Token, error) { defer resp.Body.Close() var t *Token - if err := decodeBodyMap(resp.Body, &t); err != nil { + if err := DecodeBodyMap(resp.Body, &t); err != nil { return nil, err } @@ -130,7 +130,7 @@ func (c *Client) CreateToken(i *CreateTokenInput) (*Token, error) { defer resp.Body.Close() var t *Token - if err := decodeBodyMap(resp.Body, &t); err != nil { + if err := DecodeBodyMap(resp.Body, &t); err != nil { return nil, err } return t, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/user.go b/vendor/github.com/fastly/go-fastly/v9/fastly/user.go index 3355d172c..c32d13a7c 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/user.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/user.go @@ -44,7 +44,7 @@ func (c *Client) ListCustomerUsers(i *ListCustomerUsersInput) ([]*User, error) { defer resp.Body.Close() var u []*User - if err := decodeBodyMap(resp.Body, &u); err != nil { + if err := DecodeBodyMap(resp.Body, &u); err != nil { return nil, err } return u, nil @@ -59,7 +59,7 @@ func (c *Client) GetCurrentUser() (*User, error) { defer resp.Body.Close() var u *User - if err := decodeBodyMap(resp.Body, &u); err != nil { + if err := DecodeBodyMap(resp.Body, &u); err != nil { return nil, err } @@ -89,7 +89,7 @@ func (c *Client) GetUser(i *GetUserInput) (*User, error) { defer resp.Body.Close() var u *User - if err := decodeBodyMap(resp.Body, &u); err != nil { + if err := DecodeBodyMap(resp.Body, &u); err != nil { return nil, err } @@ -115,7 +115,7 @@ func (c *Client) CreateUser(i *CreateUserInput) (*User, error) { defer resp.Body.Close() var u *User - if err := decodeBodyMap(resp.Body, &u); err != nil { + if err := DecodeBodyMap(resp.Body, &u); err != nil { return nil, err } return u, nil @@ -146,7 +146,7 @@ func (c *Client) UpdateUser(i *UpdateUserInput) (*User, error) { defer resp.Body.Close() var u *User - if err := decodeBodyMap(resp.Body, &u); err != nil { + if err := DecodeBodyMap(resp.Body, &u); err != nil { return nil, err } return u, nil @@ -173,7 +173,7 @@ func (c *Client) DeleteUser(i *DeleteUserInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { @@ -203,7 +203,7 @@ func (c *Client) ResetUserPassword(i *ResetUserPasswordInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_cache_setting.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_cache_setting.go index 27ac409d3..9ccccf04f 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_cache_setting.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_cache_setting.go @@ -58,7 +58,7 @@ func (c *Client) ListCacheSettings(i *ListCacheSettingsInput) ([]*CacheSetting, defer resp.Body.Close() var cs []*CacheSetting - if err := decodeBodyMap(resp.Body, &cs); err != nil { + if err := DecodeBodyMap(resp.Body, &cs); err != nil { return nil, err } return cs, nil @@ -99,7 +99,7 @@ func (c *Client) CreateCacheSetting(i *CreateCacheSettingInput) (*CacheSetting, defer resp.Body.Close() var cs *CacheSetting - if err := decodeBodyMap(resp.Body, &cs); err != nil { + if err := DecodeBodyMap(resp.Body, &cs); err != nil { return nil, err } return cs, nil @@ -135,7 +135,7 @@ func (c *Client) GetCacheSetting(i *GetCacheSettingInput) (*CacheSetting, error) defer resp.Body.Close() var cs *CacheSetting - if err := decodeBodyMap(resp.Body, &cs); err != nil { + if err := DecodeBodyMap(resp.Body, &cs); err != nil { return nil, err } return cs, nil @@ -181,7 +181,7 @@ func (c *Client) UpdateCacheSetting(i *UpdateCacheSettingInput) (*CacheSetting, defer resp.Body.Close() var cs *CacheSetting - if err := decodeBodyMap(resp.Body, &cs); err != nil { + if err := DecodeBodyMap(resp.Body, &cs); err != nil { return nil, err } return cs, nil @@ -217,7 +217,7 @@ func (c *Client) DeleteCacheSetting(i *DeleteCacheSettingInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_condition.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_condition.go index 307060b28..ec0a3c6e3 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_condition.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_condition.go @@ -44,7 +44,7 @@ func (c *Client) ListConditions(i *ListConditionsInput) ([]*Condition, error) { defer resp.Body.Close() var cs []*Condition - if err := decodeBodyMap(resp.Body, &cs); err != nil { + if err := DecodeBodyMap(resp.Body, &cs); err != nil { return nil, err } return cs, nil @@ -83,7 +83,7 @@ func (c *Client) CreateCondition(i *CreateConditionInput) (*Condition, error) { defer resp.Body.Close() var co *Condition - if err := decodeBodyMap(resp.Body, &co); err != nil { + if err := DecodeBodyMap(resp.Body, &co); err != nil { return nil, err } return co, nil @@ -119,7 +119,7 @@ func (c *Client) GetCondition(i *GetConditionInput) (*Condition, error) { defer resp.Body.Close() var co *Condition - if err := decodeBodyMap(resp.Body, &co); err != nil { + if err := DecodeBodyMap(resp.Body, &co); err != nil { return nil, err } return co, nil @@ -163,7 +163,7 @@ func (c *Client) UpdateCondition(i *UpdateConditionInput) (*Condition, error) { defer resp.Body.Close() var co *Condition - if err := decodeBodyMap(resp.Body, &co); err != nil { + if err := DecodeBodyMap(resp.Body, &co); err != nil { return nil, err } return co, nil @@ -199,7 +199,7 @@ func (c *Client) DeleteCondition(i *DeleteConditionInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_custom.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_custom.go index 8fd6f612a..0a30a48ff 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_custom.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_custom.go @@ -42,7 +42,7 @@ func (c *Client) ListVCLs(i *ListVCLsInput) ([]*VCL, error) { defer resp.Body.Close() var vcls []*VCL - if err := decodeBodyMap(resp.Body, &vcls); err != nil { + if err := DecodeBodyMap(resp.Body, &vcls); err != nil { return nil, err } return vcls, nil @@ -78,7 +78,7 @@ func (c *Client) GetVCL(i *GetVCLInput) (*VCL, error) { defer resp.Body.Close() var vcl *VCL - if err := decodeBodyMap(resp.Body, &vcl); err != nil { + if err := DecodeBodyMap(resp.Body, &vcl); err != nil { return nil, err } return vcl, nil @@ -109,7 +109,7 @@ func (c *Client) GetGeneratedVCL(i *GetGeneratedVCLInput) (*VCL, error) { defer resp.Body.Close() var vcl *VCL - if err := decodeBodyMap(resp.Body, &vcl); err != nil { + if err := DecodeBodyMap(resp.Body, &vcl); err != nil { return nil, err } return vcl, nil @@ -146,7 +146,7 @@ func (c *Client) CreateVCL(i *CreateVCLInput) (*VCL, error) { defer resp.Body.Close() var vcl *VCL - if err := decodeBodyMap(resp.Body, &vcl); err != nil { + if err := DecodeBodyMap(resp.Body, &vcl); err != nil { return nil, err } return vcl, nil @@ -186,7 +186,7 @@ func (c *Client) UpdateVCL(i *UpdateVCLInput) (*VCL, error) { defer resp.Body.Close() var vcl *VCL - if err := decodeBodyMap(resp.Body, &vcl); err != nil { + if err := DecodeBodyMap(resp.Body, &vcl); err != nil { return nil, err } return vcl, nil @@ -222,7 +222,7 @@ func (c *Client) ActivateVCL(i *ActivateVCLInput) (*VCL, error) { defer resp.Body.Close() var vcl *VCL - if err := decodeBodyMap(resp.Body, &vcl); err != nil { + if err := DecodeBodyMap(resp.Body, &vcl); err != nil { return nil, err } return vcl, nil @@ -258,7 +258,7 @@ func (c *Client) DeleteVCL(i *DeleteVCLInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_gzip.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_gzip.go index 31f534cb2..12732a9f8 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_gzip.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_gzip.go @@ -43,7 +43,7 @@ func (c *Client) ListGzips(i *ListGzipsInput) ([]*Gzip, error) { defer resp.Body.Close() var gzips []*Gzip - if err := decodeBodyMap(resp.Body, &gzips); err != nil { + if err := DecodeBodyMap(resp.Body, &gzips); err != nil { return nil, err } return gzips, nil @@ -82,7 +82,7 @@ func (c *Client) CreateGzip(i *CreateGzipInput) (*Gzip, error) { defer resp.Body.Close() var gzip *Gzip - if err := decodeBodyMap(resp.Body, &gzip); err != nil { + if err := DecodeBodyMap(resp.Body, &gzip); err != nil { return nil, err } return gzip, nil @@ -118,7 +118,7 @@ func (c *Client) GetGzip(i *GetGzipInput) (*Gzip, error) { defer resp.Body.Close() var b *Gzip - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -162,7 +162,7 @@ func (c *Client) UpdateGzip(i *UpdateGzipInput) (*Gzip, error) { defer resp.Body.Close() var b *Gzip - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -198,7 +198,7 @@ func (c *Client) DeleteGzip(i *DeleteGzipInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_header.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_header.go index da50779ff..e9f4a6155 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_header.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_header.go @@ -94,7 +94,7 @@ func (c *Client) ListHeaders(i *ListHeadersInput) ([]*Header, error) { defer resp.Body.Close() var bs []*Header - if err := decodeBodyMap(resp.Body, &bs); err != nil { + if err := DecodeBodyMap(resp.Body, &bs); err != nil { return nil, err } return bs, nil @@ -149,7 +149,7 @@ func (c *Client) CreateHeader(i *CreateHeaderInput) (*Header, error) { defer resp.Body.Close() var b *Header - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -185,7 +185,7 @@ func (c *Client) GetHeader(i *GetHeaderInput) (*Header, error) { defer resp.Body.Close() var b *Header - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -245,7 +245,7 @@ func (c *Client) UpdateHeader(i *UpdateHeaderInput) (*Header, error) { defer resp.Body.Close() var b *Header - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -281,7 +281,7 @@ func (c *Client) DeleteHeader(i *DeleteHeaderInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_http3.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_http3.go index 4b942da82..0c89e6536 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_http3.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_http3.go @@ -40,7 +40,7 @@ func (c *Client) GetHTTP3(i *GetHTTP3Input) (*HTTP3, error) { defer resp.Body.Close() var h *HTTP3 - if err := decodeBodyMap(resp.Body, &h); err != nil { + if err := DecodeBodyMap(resp.Body, &h); err != nil { return nil, err } @@ -74,7 +74,7 @@ func (c *Client) EnableHTTP3(i *EnableHTTP3Input) (*HTTP3, error) { defer resp.Body.Close() var http3 *HTTP3 - if err := decodeBodyMap(resp.Body, &http3); err != nil { + if err := DecodeBodyMap(resp.Body, &http3); err != nil { return nil, err } return http3, nil @@ -105,7 +105,7 @@ func (c *Client) DisableHTTP3(i *DisableHTTP3Input) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_rate_limiter.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_rate_limiter.go index a9258f81c..58e61aee6 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_rate_limiter.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_rate_limiter.go @@ -207,7 +207,7 @@ func (c *Client) ListERLs(i *ListERLsInput) ([]*ERL, error) { defer resp.Body.Close() var erls []*ERL - if err := decodeBodyMap(resp.Body, &erls); err != nil { + if err := DecodeBodyMap(resp.Body, &erls); err != nil { return nil, err } @@ -263,7 +263,7 @@ func (c *Client) CreateERL(i *CreateERLInput) (*ERL, error) { defer resp.Body.Close() var erl *ERL - if err := decodeBodyMap(resp.Body, &erl); err != nil { + if err := DecodeBodyMap(resp.Body, &erl); err != nil { return nil, err } @@ -291,7 +291,7 @@ func (c *Client) DeleteERL(i *DeleteERLInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { @@ -322,7 +322,7 @@ func (c *Client) GetERL(i *GetERLInput) (*ERL, error) { defer resp.Body.Close() var erl *ERL - if err := decodeBodyMap(resp.Body, &erl); err != nil { + if err := DecodeBodyMap(resp.Body, &erl); err != nil { return nil, err } @@ -374,7 +374,7 @@ func (c *Client) UpdateERL(i *UpdateERLInput) (*ERL, error) { defer resp.Body.Close() var erl *ERL - if err := decodeBodyMap(resp.Body, &erl); err != nil { + if err := DecodeBodyMap(resp.Body, &erl); err != nil { return nil, err } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_request_setting.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_request_setting.go index d8f678099..90b46871f 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_request_setting.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_request_setting.go @@ -87,7 +87,7 @@ func (c *Client) ListRequestSettings(i *ListRequestSettingsInput) ([]*RequestSet defer resp.Body.Close() var bs []*RequestSetting - if err := decodeBodyMap(resp.Body, &bs); err != nil { + if err := DecodeBodyMap(resp.Body, &bs); err != nil { return nil, err } return bs, nil @@ -143,7 +143,7 @@ func (c *Client) CreateRequestSetting(i *CreateRequestSettingInput) (*RequestSet defer resp.Body.Close() var b *RequestSetting - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -179,7 +179,7 @@ func (c *Client) GetRequestSetting(i *GetRequestSettingInput) (*RequestSetting, defer resp.Body.Close() var b *RequestSetting - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -240,7 +240,7 @@ func (c *Client) UpdateRequestSetting(i *UpdateRequestSettingInput) (*RequestSet defer resp.Body.Close() var b *RequestSetting - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -276,7 +276,7 @@ func (c *Client) DeleteRequestSetting(i *DeleteRequestSettingInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_response_object.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_response_object.go index c6b2e657b..a1d428330 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_response_object.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_response_object.go @@ -47,7 +47,7 @@ func (c *Client) ListResponseObjects(i *ListResponseObjectsInput) ([]*ResponseOb defer resp.Body.Close() var bs []*ResponseObject - if err := decodeBodyMap(resp.Body, &bs); err != nil { + if err := DecodeBodyMap(resp.Body, &bs); err != nil { return nil, err } return bs, nil @@ -93,7 +93,7 @@ func (c *Client) CreateResponseObject(i *CreateResponseObjectInput) (*ResponseOb defer resp.Body.Close() var b *ResponseObject - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -129,7 +129,7 @@ func (c *Client) GetResponseObject(i *GetResponseObjectInput) (*ResponseObject, defer resp.Body.Close() var b *ResponseObject - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -180,7 +180,7 @@ func (c *Client) UpdateResponseObject(i *UpdateResponseObjectInput) (*ResponseOb defer resp.Body.Close() var b *ResponseObject - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -216,7 +216,7 @@ func (c *Client) DeleteResponseObject(i *DeleteResponseObjectInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_settings.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_settings.go index 61c1feb01..e43cbf996 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_settings.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_settings.go @@ -37,7 +37,7 @@ func (c *Client) GetSettings(i *GetSettingsInput) (*Settings, error) { defer resp.Body.Close() var b *Settings - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil @@ -76,7 +76,7 @@ func (c *Client) UpdateSettings(i *UpdateSettingsInput) (*Settings, error) { defer resp.Body.Close() var b *Settings - if err := decodeBodyMap(resp.Body, &b); err != nil { + if err := DecodeBodyMap(resp.Body, &b); err != nil { return nil, err } return b, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_snippets.go b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_snippets.go index 288391610..85227f030 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_snippets.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/vcl_snippets.go @@ -93,7 +93,7 @@ func (c *Client) CreateSnippet(i *CreateSnippetInput) (*Snippet, error) { defer resp.Body.Close() var snippet *Snippet - if err := decodeBodyMap(resp.Body, &snippet); err != nil { + if err := DecodeBodyMap(resp.Body, &snippet); err != nil { return nil, err } return snippet, err @@ -137,7 +137,7 @@ func (c *Client) UpdateSnippet(i *UpdateSnippetInput) (*Snippet, error) { defer resp.Body.Close() var snippet *Snippet - if err := decodeBodyMap(resp.Body, &snippet); err != nil { + if err := DecodeBodyMap(resp.Body, &snippet); err != nil { return nil, err } return snippet, err @@ -181,7 +181,7 @@ func (c *Client) UpdateDynamicSnippet(i *UpdateDynamicSnippetInput) (*DynamicSni defer resp.Body.Close() var updateSnippet *DynamicSnippet - if err := decodeBodyMap(resp.Body, &updateSnippet); err != nil { + if err := DecodeBodyMap(resp.Body, &updateSnippet); err != nil { return nil, err } return updateSnippet, err @@ -217,7 +217,7 @@ func (c *Client) DeleteSnippet(i *DeleteSnippetInput) error { defer resp.Body.Close() var r *statusResp - if err := decodeBodyMap(resp.Body, &r); err != nil { + if err := DecodeBodyMap(resp.Body, &r); err != nil { return err } if !r.Ok() { @@ -254,7 +254,7 @@ func (c *Client) ListSnippets(i *ListSnippetsInput) ([]*Snippet, error) { defer resp.Body.Close() var snippets []*Snippet - if err := decodeBodyMap(resp.Body, &snippets); err != nil { + if err := DecodeBodyMap(resp.Body, &snippets); err != nil { return nil, err } return snippets, nil @@ -293,7 +293,7 @@ func (c *Client) GetSnippet(i *GetSnippetInput) (*Snippet, error) { defer resp.Body.Close() var snippet *Snippet - if err := decodeBodyMap(resp.Body, &snippet); err != nil { + if err := DecodeBodyMap(resp.Body, &snippet); err != nil { return nil, err } return snippet, nil @@ -327,7 +327,7 @@ func (c *Client) GetDynamicSnippet(i *GetDynamicSnippetInput) (*DynamicSnippet, defer resp.Body.Close() var snippet *DynamicSnippet - if err := decodeBodyMap(resp.Body, &snippet); err != nil { + if err := DecodeBodyMap(resp.Body, &snippet); err != nil { return nil, err } return snippet, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/waf.go b/vendor/github.com/fastly/go-fastly/v9/fastly/waf.go index 664fd0c12..1b396b932 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/waf.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/waf.go @@ -71,15 +71,13 @@ func (i *ListWAFsInput) formatFilters() map[string]string { } for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } } diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rule_exclusion.go b/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rule_exclusion.go index c285e3851..231509ca5 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rule_exclusion.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rule_exclusion.go @@ -284,7 +284,7 @@ func (c *Client) UpdateWAFRuleExclusion(i *UpdateWAFRuleExclusionInput) (*WAFRul defer resp.Body.Close() var exc *WAFRuleExclusion - if err := decodeBodyMap(resp.Body, &exc); err != nil { + if err := DecodeBodyMap(resp.Body, &exc); err != nil { return nil, err } return exc, nil diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rules.go b/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rules.go index dc27d9749..c79b4aff1 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rules.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/waf_rules.go @@ -74,24 +74,20 @@ func (i *ListWAFRulesInput) formatFilters() map[string]string { } for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } - case "[]string": - v, _ := value.([]string) // type assert to avoid runtime panic (v will have zero value for its type) + case []string: if len(v) > 0 { result[key] = strings.Join(v, ",") } - case "[]int": - v, _ := value.([]int) // type assert to avoid runtime panic (v will have zero value for its type) + case []int: if len(v) > 0 { stringSlice := make([]string, len(v)) for i, id := range v { diff --git a/vendor/github.com/fastly/go-fastly/v9/fastly/waf_version.go b/vendor/github.com/fastly/go-fastly/v9/fastly/waf_version.go index 82de17640..29ed0a728 100644 --- a/vendor/github.com/fastly/go-fastly/v9/fastly/waf_version.go +++ b/vendor/github.com/fastly/go-fastly/v9/fastly/waf_version.go @@ -111,15 +111,13 @@ func (i *ListWAFVersionsInput) formatFilters() map[string]string { } for key, value := range pairings { - switch t := reflect.TypeOf(value).String(); t { - case "string": - if value != "" { - v, _ := value.(string) // type assert to avoid runtime panic (v will have zero value for its type) + switch v := value.(type) { + case string: + if v != "" { result[key] = v } - case "int": - if value != 0 { - v, _ := value.(int) // type assert to avoid runtime panic (v will have zero value for its type) + case int: + if v != 0 { result[key] = strconv.Itoa(v) } } diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go index f7bedda38..ac67205bc 100644 --- a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -5,8 +5,6 @@ import ( "unicode" ) -const nbsp = 0xA0 - // WrapString wraps the given string within lim width in characters. // // Wrapping is currently naive and only happens at white-space. A future @@ -20,58 +18,50 @@ func WrapString(s string, lim uint) string { var current uint var wordBuf, spaceBuf bytes.Buffer - var wordBufLen, spaceBufLen uint for _, char := range s { if char == '\n' { if wordBuf.Len() == 0 { - if current+spaceBufLen > lim { + if current+uint(spaceBuf.Len()) > lim { current = 0 } else { - current += spaceBufLen + current += uint(spaceBuf.Len()) spaceBuf.WriteTo(buf) } spaceBuf.Reset() - spaceBufLen = 0 } else { - current += spaceBufLen + wordBufLen + current += uint(spaceBuf.Len() + wordBuf.Len()) spaceBuf.WriteTo(buf) spaceBuf.Reset() - spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() - wordBufLen = 0 } buf.WriteRune(char) current = 0 - } else if unicode.IsSpace(char) && char != nbsp { + } else if unicode.IsSpace(char) { if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += spaceBufLen + wordBufLen + current += uint(spaceBuf.Len() + wordBuf.Len()) spaceBuf.WriteTo(buf) spaceBuf.Reset() - spaceBufLen = 0 wordBuf.WriteTo(buf) wordBuf.Reset() - wordBufLen = 0 } spaceBuf.WriteRune(char) - spaceBufLen++ } else { + wordBuf.WriteRune(char) - wordBufLen++ - if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { buf.WriteRune('\n') current = 0 spaceBuf.Reset() - spaceBufLen = 0 } } } if wordBuf.Len() == 0 { - if current+spaceBufLen <= lim { + if current+uint(spaceBuf.Len()) <= lim { spaceBuf.WriteTo(buf) } } else { diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 000000000..362bdd41c --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md index eba7d11cf..a7228cd9a 100644 --- a/vendor/github.com/oklog/run/README.md +++ b/vendor/github.com/oklog/run/README.md @@ -1,7 +1,7 @@ # run [![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) [![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) @@ -10,11 +10,9 @@ run.Group is a universal mechanism to manage goroutine lifecycles. Create a zero-value run.Group, and then add actors to it. Actors are defined as a pair of functions: an **execute** function, which should run synchronously; and an **interrupt** function, which, when invoked, should cause the execute -function to return. Finally, invoke Run, which concurrently runs all of the -actors, waits until the first actor exits, invokes the interrupt functions, and -finally returns control to the caller only once all actors have returned. This -general-purpose API allows callers to model pretty much any runnable task, and -achieve well-defined lifecycle semantics for the group. +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. run.Group was written to manage component lifecycles in func main for [OK Log](https://github.com/oklog/oklog). diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go deleted file mode 100644 index ef93495d3..000000000 --- a/vendor/github.com/oklog/run/actors.go +++ /dev/null @@ -1,38 +0,0 @@ -package run - -import ( - "context" - "fmt" - "os" - "os/signal" -) - -// SignalHandler returns an actor, i.e. an execute and interrupt func, that -// terminates with SignalError when the process receives one of the provided -// signals, or the parent context is canceled. -func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { - ctx, cancel := context.WithCancel(ctx) - return func() error { - c := make(chan os.Signal, 1) - signal.Notify(c, signals...) - select { - case sig := <-c: - return SignalError{Signal: sig} - case <-ctx.Done(): - return ctx.Err() - } - }, func(error) { - cancel() - } -} - -// SignalError is returned by the signal handler's execute function -// when it terminates due to a received signal. -type SignalError struct { - Signal os.Signal -} - -// Error implements the error interface. -func (e SignalError) Error() string { - return fmt.Sprintf("received signal %s", e.Signal) -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index c0d1c29e6..5b5ccd96f 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) { } // cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. +// acceptable for a user. This is a FIFO cache. type cachedPubKey struct { user string pubKeyData []byte @@ -157,7 +157,13 @@ type cachedPubKey struct { perms *Permissions } -const maxCachedPubKeys = 16 +// maxCachedPubKeys is the number of cache entries we store. +// +// Due to consistent misuse of the PublicKeyCallback API, we have reduced this +// to 1, such that the only key in the cache is the most recently seen one. This +// forces the behavior that the last call to PublicKeyCallback will always be +// with the key that is used for authentication. +const maxCachedPubKeys = 1 // pubKeyCache caches tests for public keys. Since SSH clients // will query whether a public key is acceptable before attempting to @@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { // add adds the given tuple to the cache. func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) + if len(c.keys) >= maxCachedPubKeys { + c.keys = c.keys[1:] } + c.keys = append(c.keys, candidate) } // ServerConn is an authenticated SSH connection, as seen from the diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index 220568259..de1b98211 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -226,8 +226,9 @@ func (x *FileSyntax) Cleanup() { continue } if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { - // Collapse block into single line. - line := &Line{ + // Collapse block into single line but keep the Line reference used by the + // parsed File structure. + *stmt.Line[0] = Line{ Comments: Comments{ Before: commentsAdd(stmt.Before, stmt.Line[0].Before), Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), @@ -235,7 +236,7 @@ func (x *FileSyntax) Cleanup() { }, Token: stringsAdd(stmt.Token, stmt.Line[0].Token), } - x.Stmt[w] = line + x.Stmt[w] = stmt.Line[0] w++ continue } diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 0e7b7e267..3e4a1d0ab 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -38,10 +38,12 @@ type File struct { Module *Module Go *Go Toolchain *Toolchain + Godebug []*Godebug Require []*Require Exclude []*Exclude Replace []*Replace Retract []*Retract + Tool []*Tool Syntax *FileSyntax } @@ -65,6 +67,13 @@ type Toolchain struct { Syntax *Line } +// A Godebug is a single godebug key=value statement. +type Godebug struct { + Key string + Value string + Syntax *Line +} + // An Exclude is a single exclude statement. type Exclude struct { Mod module.Version @@ -85,6 +94,12 @@ type Retract struct { Syntax *Line } +// A Tool is a single tool statement. +type Tool struct { + Path string + Syntax *Line +} + // A VersionInterval represents a range of versions with upper and lower bounds. // Intervals are closed: both bounds are included. When Low is equal to High, // the interval may refer to a single version ('v1.2.3') or an interval @@ -289,7 +304,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse }) } continue - case "module", "require", "exclude", "replace", "retract": + case "module", "godebug", "require", "exclude", "replace", "retract", "tool": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } @@ -308,7 +323,9 @@ var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9]. // Toolchains must be named beginning with `go1`, // like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. -// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+ +// Note that this regexp is a much looser condition than go/version.IsValid, +// for forward compatibility. +// (This code has to be work to identify new toolchains even if we tweak the syntax in the future.) var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { @@ -384,7 +401,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a if len(args) != 1 { errorf("toolchain directive expects exactly one argument") return - } else if strict && !ToolchainRE.MatchString(args[0]) { + } else if !ToolchainRE.MatchString(args[0]) { errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) return } @@ -412,6 +429,22 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } f.Module.Mod = module.Version{Path: s} + case "godebug": + if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { + errorf("usage: godebug key=value") + return + } + key, value, ok := strings.Cut(args[0], "=") + if !ok { + errorf("usage: godebug key=value") + return + } + f.Godebug = append(f.Godebug, &Godebug{ + Key: key, + Value: value, + Syntax: line, + }) + case "require", "exclude": if len(args) != 2 { errorf("usage: %s module/path v1.2.3", verb) @@ -483,6 +516,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Syntax: line, } f.Retract = append(f.Retract, retract) + + case "tool": + if len(args) != 1 { + errorf("tool directive expects exactly one argument") + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Tool = append(f.Tool, &Tool{ + Path: s, + Syntax: line, + }) } } @@ -654,6 +702,22 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, f.Toolchain = &Toolchain{Syntax: line} f.Toolchain.Name = args[0] + case "godebug": + if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { + errorf("usage: godebug key=value") + return + } + key, value, ok := strings.Cut(args[0], "=") + if !ok { + errorf("usage: godebug key=value") + return + } + f.Godebug = append(f.Godebug, &Godebug{ + Key: key, + Value: value, + Syntax: line, + }) + case "use": if len(args) != 1 { errorf("usage: %s local/dir", verb) @@ -929,6 +993,15 @@ func (f *File) Format() ([]byte, error) { // Cleanup cleans out all the cleared entries. func (f *File) Cleanup() { w := 0 + for _, g := range f.Godebug { + if g.Key != "" { + f.Godebug[w] = g + w++ + } + } + f.Godebug = f.Godebug[:w] + + w = 0 for _, r := range f.Require { if r.Mod.Path != "" { f.Require[w] = r @@ -1027,6 +1100,45 @@ func (f *File) AddToolchainStmt(name string) error { return nil } +// AddGodebug sets the first godebug line for key to value, +// preserving any existing comments for that line and removing all +// other godebug lines for key. +// +// If no line currently exists for key, AddGodebug adds a new line +// at the end of the last godebug block. +func (f *File) AddGodebug(key, value string) error { + need := true + for _, g := range f.Godebug { + if g.Key == key { + if need { + g.Value = value + f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) + need = false + } else { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + } + + if need { + f.addNewGodebug(key, value) + } + return nil +} + +// addNewGodebug adds a new godebug key=value line at the end +// of the last godebug block, regardless of any existing godebug lines for key. +func (f *File) addNewGodebug(key, value string) { + line := f.Syntax.addLine(nil, "godebug", key+"="+value) + g := &Godebug{ + Key: key, + Value: value, + Syntax: line, + } + f.Godebug = append(f.Godebug, g) +} + // AddRequire sets the first require line for path to version vers, // preserving any existing comments for that line and removing all // other lines for path. @@ -1334,6 +1446,16 @@ func (f *File) SetRequireSeparateIndirect(req []*Require) { f.SortBlocks() } +func (f *File) DropGodebug(key string) error { + for _, g := range f.Godebug { + if g.Key == key { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + return nil +} + func (f *File) DropRequire(path string) error { for _, r := range f.Require { if r.Mod.Path == path { @@ -1467,6 +1589,36 @@ func (f *File) DropRetract(vi VersionInterval) error { return nil } +// AddTool adds a new tool directive with the given path. +// It does nothing if the tool line already exists. +func (f *File) AddTool(path string) error { + for _, t := range f.Tool { + if t.Path == path { + return nil + } + } + + f.Tool = append(f.Tool, &Tool{ + Path: path, + Syntax: f.Syntax.addLine(nil, "tool", path), + }) + + f.SortBlocks() + return nil +} + +// RemoveTool removes a tool directive with the given path. +// It does nothing if no such tool directive exists. +func (f *File) DropTool(path string) error { + for _, t := range f.Tool { + if t.Path == path { + t.Syntax.markRemoved() + *t = Tool{} + } + } + return nil +} + func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe @@ -1493,9 +1645,9 @@ func (f *File) SortBlocks() { } } -// removeDups removes duplicate exclude and replace directives. +// removeDups removes duplicate exclude, replace and tool directives. // -// Earlier exclude directives take priority. +// Earlier exclude and tool directives take priority. // // Later replace directives take priority. // @@ -1505,10 +1657,10 @@ func (f *File) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *File) removeDups() { - removeDups(f.Syntax, &f.Exclude, &f.Replace) + removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool) } -func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) { kill := make(map[*Line]bool) // Remove duplicate excludes. @@ -1549,6 +1701,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { } *replace = repl + if tool != nil { + haveTool := make(map[string]bool) + for _, t := range *tool { + if haveTool[t.Path] { + kill[t.Syntax] = true + continue + } + haveTool[t.Path] = true + } + var newTool []*Tool + for _, t := range *tool { + if !kill[t.Syntax] { + newTool = append(newTool, t) + } + } + *tool = newTool + } + // Duplicate require and retract directives are not removed. // Drop killed statements from the syntax tree. diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go index d7b99376e..5387d0c26 100644 --- a/vendor/golang.org/x/mod/modfile/work.go +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -14,6 +14,7 @@ import ( type WorkFile struct { Go *Go Toolchain *Toolchain + Godebug []*Godebug Use []*Use Replace []*Replace @@ -68,7 +69,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) continue - case "use", "replace": + case "godebug", "use", "replace": for _, l := range x.Line { f.add(&errs, l, x.Token[0], l.Token, fix) } @@ -184,6 +185,55 @@ func (f *WorkFile) DropToolchainStmt() { } } +// AddGodebug sets the first godebug line for key to value, +// preserving any existing comments for that line and removing all +// other godebug lines for key. +// +// If no line currently exists for key, AddGodebug adds a new line +// at the end of the last godebug block. +func (f *WorkFile) AddGodebug(key, value string) error { + need := true + for _, g := range f.Godebug { + if g.Key == key { + if need { + g.Value = value + f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) + need = false + } else { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + } + + if need { + f.addNewGodebug(key, value) + } + return nil +} + +// addNewGodebug adds a new godebug key=value line at the end +// of the last godebug block, regardless of any existing godebug lines for key. +func (f *WorkFile) addNewGodebug(key, value string) { + line := f.Syntax.addLine(nil, "godebug", key+"="+value) + g := &Godebug{ + Key: key, + Value: value, + Syntax: line, + } + f.Godebug = append(f.Godebug, g) +} + +func (f *WorkFile) DropGodebug(key string) error { + for _, g := range f.Godebug { + if g.Key == key { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + return nil +} + func (f *WorkFile) AddUse(diskPath, modulePath string) error { need := true for _, d := range f.Use { @@ -281,5 +331,5 @@ func (f *WorkFile) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *WorkFile) removeDups() { - removeDups(f.Syntax, nil, &f.Replace) + removeDups(f.Syntax, nil, &f.Replace, nil) } diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index de58dfb8d..ca645d9a1 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index e3784123c..5b516c55f 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b279..81faec7e7 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 7688c356b..c7601c909 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 832414b45..b55547aec 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f5968f440..b2e2ed337 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,27 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +398,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1076,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1142,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1466,6 +1483,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1500,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1752,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2072,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2088,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2112,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2149,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2392,9 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) @@ -2461,7 +2510,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2556,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2894,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3029,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3138,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3170,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3179,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3208,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3286,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2..be8c00207 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index ccba391c9..6ebc48b3f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1799,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1924,6 +1931,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2970,6 +2978,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0c00cb3f3..c0d45e320 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -297,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -335,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dfb364554..c731d24f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -298,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -336,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index d46dcf78a..680018a4a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -303,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -341,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3af3248a7..a63909f30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -205,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -294,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -332,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 292bcf028..9b0a2573f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -290,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -328,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 782b7110f..958e6e064 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 84973fd92..50c7f25bd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 6d9cbc3b2..ced21d66d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 5f9fedbce..226c04419 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index bb0026ee0..3122737cd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -351,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -389,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46120db5c..eb5d3467e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5c951634f..e921ebc60 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 11a84d5af..38ba81c55 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -287,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -325,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f78c4617c..71f040097 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -359,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -397,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index aeb777c34..c44a31332 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -350,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -436,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d43..17c53bd9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941a..2392226a7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8daaf3faf..5537148dc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2594,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3541,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3802,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3850,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4031,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4200,7 +4200,8 @@ type ( } PtpSysOffsetExtended struct { Samples uint32 - Rsv [3]uint32 + Clockid int32 + Rsv [2]uint32 Ts [25][3]PtpClockTime } PtpSysOffsetPrecise struct { @@ -4399,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf63..3ca814f54 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 4510bfc3f..4a3254386 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 51311e205..9d138de5f 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6f5252880..01c0716c2 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -280,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1612,7 +1614,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1652,7 +1654,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1660,7 +1662,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1672,7 +1674,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1684,7 +1686,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -2446,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2462,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/cmd/stringer/gotypesalias.go b/vendor/golang.org/x/tools/cmd/stringer/gotypesalias.go new file mode 100644 index 000000000..288c10c2d --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/stringer/gotypesalias.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +//go:debug gotypesalias=1 + +package main + +// Materialize aliases whenever the go toolchain version is after 1.23 (#69772). +// Remove this file after go.mod >= 1.23 (which implies gotypesalias=1). diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index 2b19c93e8..09be11ca5 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -58,6 +58,11 @@ // where t is the lower-cased name of the first type listed. It can be overridden // with the -output flag. // +// Types can also be declared in tests, in which case type declarations in the +// non-test package or its test variant are preferred over types defined in the +// package with suffix "_test". +// The default output file for type declarations in tests is t_string_test.go with t picked as above. +// // The -linecomment flag tells stringer to generate the text of any line comment, trimmed // of leading spaces, instead of the constant name. For instance, if the constants above had a // Pill prefix, one could write @@ -128,10 +133,6 @@ func main() { // Parse the package once. var dir string - g := Generator{ - trimPrefix: *trimprefix, - lineComment: *linecomment, - } // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). if len(args) == 1 && isDirectory(args[0]) { dir = args[0] @@ -142,33 +143,90 @@ func main() { dir = filepath.Dir(args[0]) } - g.parsePackage(args, tags) + // For each type, generate code in the first package where the type is declared. + // The order of packages is as follows: + // package x + // package x compiled for tests + // package x_test + // + // Each package pass could result in a separate generated file. + // These files must have the same package and test/not-test nature as the types + // from which they were generated. + // + // Types will be excluded when generated, to avoid repetitions. + pkgs := loadPackages(args, tags, *trimprefix, *linecomment, nil /* logf */) + sort.Slice(pkgs, func(i, j int) bool { + // Put x_test packages last. + iTest := strings.HasSuffix(pkgs[i].name, "_test") + jTest := strings.HasSuffix(pkgs[j].name, "_test") + if iTest != jTest { + return !iTest + } - // Print the header and package clause. - g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) - g.Printf("\n") - g.Printf("package %s", g.pkg.name) - g.Printf("\n") - g.Printf("import \"strconv\"\n") // Used by all methods. + return len(pkgs[i].files) < len(pkgs[j].files) + }) + for _, pkg := range pkgs { + g := Generator{ + pkg: pkg, + } - // Run generate for each type. - for _, typeName := range types { - g.generate(typeName) + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for types that can be found. Keep the rest for the remainingTypes iteration. + var foundTypes, remainingTypes []string + for _, typeName := range types { + values := findValues(typeName, pkg) + if len(values) > 0 { + g.generate(typeName, values) + foundTypes = append(foundTypes, typeName) + } else { + remainingTypes = append(remainingTypes, typeName) + } + } + if len(foundTypes) == 0 { + // This package didn't have any of the relevant types, skip writing a file. + continue + } + if len(remainingTypes) > 0 && output != nil && *output != "" { + log.Fatalf("cannot write to single file (-output=%q) when matching types are found in multiple packages", *output) + } + types = remainingTypes + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + // Type names will be unique across packages since only the first + // match is picked. + // So there won't be collisions between a package compiled for tests + // and the separate package of tests (package foo_test). + outputName = filepath.Join(dir, baseName(pkg, foundTypes[0])) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } } - // Format the output. - src := g.format() - - // Write to file. - outputName := *output - if outputName == "" { - baseName := fmt.Sprintf("%s_string.go", types[0]) - outputName = filepath.Join(dir, strings.ToLower(baseName)) + if len(types) > 0 { + log.Fatalf("no values defined for types: %s", strings.Join(types, ",")) } - err := os.WriteFile(outputName, src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) +} + +// baseName that will put the generated code together with pkg. +func baseName(pkg *Package, typename string) string { + suffix := "string.go" + if pkg.hasTestFiles { + suffix = "string_test.go" } + return fmt.Sprintf("%s_%s", strings.ToLower(typename), suffix) } // isDirectory reports whether the named file is a directory. @@ -186,9 +244,6 @@ type Generator struct { buf bytes.Buffer // Accumulated output. pkg *Package // Package we are scanning. - trimPrefix string - lineComment bool - logf func(format string, args ...interface{}) // test logging hook; nil when not testing } @@ -209,54 +264,74 @@ type File struct { } type Package struct { - name string - defs map[*ast.Ident]types.Object - files []*File + name string + defs map[*ast.Ident]types.Object + files []*File + hasTestFiles bool } -// parsePackage analyzes the single package constructed from the patterns and tags. -// parsePackage exits if there is an error. -func (g *Generator) parsePackage(patterns []string, tags []string) { +// loadPackages analyzes the single package constructed from the patterns and tags. +// loadPackages exits if there is an error. +// +// Returns all variants (such as tests) of the package. +// +// logf is a test logging hook. It can be nil when not testing. +func loadPackages( + patterns, tags []string, + trimPrefix string, lineComment bool, + logf func(format string, args ...interface{}), +) []*Package { cfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, - // TODO: Need to think about constants in test files. Maybe write type_string_test.go - // in a separate pass? For later. - Tests: false, + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedFiles, + // Tests are included, let the caller decide how to fold them in. + Tests: true, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, - Logf: g.logf, + Logf: logf, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } - if len(pkgs) != 1 { - log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) + if len(pkgs) == 0 { + log.Fatalf("error: no packages matching %v", strings.Join(patterns, " ")) } - g.addPackage(pkgs[0]) -} -// addPackage adds a type checked Package and its syntax files to the generator. -func (g *Generator) addPackage(pkg *packages.Package) { - g.pkg = &Package{ - name: pkg.Name, - defs: pkg.TypesInfo.Defs, - files: make([]*File, len(pkg.Syntax)), - } + out := make([]*Package, len(pkgs)) + for i, pkg := range pkgs { + p := &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for j, file := range pkg.Syntax { + p.files[j] = &File{ + file: file, + pkg: p, - for i, file := range pkg.Syntax { - g.pkg.files[i] = &File{ - file: file, - pkg: g.pkg, - trimPrefix: g.trimPrefix, - lineComment: g.lineComment, + trimPrefix: trimPrefix, + lineComment: lineComment, + } } + + // Keep track of test files, since we might want to generated + // code that ends up in that kind of package. + // Can be replaced once https://go.dev/issue/38445 lands. + for _, f := range pkg.GoFiles { + if strings.HasSuffix(f, "_test.go") { + p.hasTestFiles = true + break + } + } + + out[i] = p } + return out } -// generate produces the String method for the named type. -func (g *Generator) generate(typeName string) { +func findValues(typeName string, pkg *Package) []Value { values := make([]Value, 0, 100) - for _, file := range g.pkg.files { + for _, file := range pkg.files { // Set the state for this run of the walker. file.typeName = typeName file.values = nil @@ -265,10 +340,11 @@ func (g *Generator) generate(typeName string) { values = append(values, file.values...) } } + return values +} - if len(values) == 0 { - log.Fatalf("no values defined for type %s", typeName) - } +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string, values []Value) { // Generate code that will fail if the constants change value. g.Printf("func _() {\n") g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 521177365..3a73084a5 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -50,7 +50,7 @@ type Analyzer struct { // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a // package that contains parse or type errors. - // The Pass.TypeErrors field may consequently be non-empty. + // The [Pass.TypeErrors] field may consequently be non-empty. RunDespiteErrors bool // Requires is a set of analyzers that must run successfully @@ -91,7 +91,7 @@ type Pass struct { Analyzer *Analyzer // the identity of the current analyzer // syntax and type information - Fset *token.FileSet // file position information + Fset *token.FileSet // file position information; Run may add new files Files []*ast.File // the abstract syntax tree of each file OtherFiles []string // names of non-Go files of this package IgnoredFiles []string // names of ignored source files in this package @@ -100,6 +100,8 @@ type Pass struct { TypesSizes types.Sizes // function for computing sizes of types TypeErrors []types.Error // type errors (only if Analyzer.RunDespiteErrors) + Module *Module // the package's enclosing module (possibly nil in some drivers) + // Report reports a Diagnostic, a finding about a specific location // in the analyzed source code such as a potential mistake. // It may be called by the Run function. @@ -154,10 +156,17 @@ type Pass struct { // AllPackageFacts returns a new slice containing all package // facts of the analysis's FactTypes in unspecified order. + // See comments for AllObjectFacts. AllPackageFacts func() []PackageFact // AllObjectFacts returns a new slice containing all object // facts of the analysis's FactTypes in unspecified order. + // + // The result includes all facts exported by packages + // whose symbols are referenced by the current package + // (by qualified identifiers or field/method selections). + // And it includes all facts exported from the current + // package by the current analysis pass. AllObjectFacts func() []ObjectFact /* Further fields may be added in future. */ @@ -238,3 +247,10 @@ func (pass *Pass) String() string { type Fact interface { AFact() // dummy method to avoid type errors } + +// A Module describes the module to which a package belongs. +type Module struct { + Path string // module path + Version string // module version ("" if unknown, such as for workspace modules) + GoVersion string // go version used in module (e.g. "go1.22.0") +} diff --git a/vendor/golang.org/x/tools/go/analysis/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/checker/checker.go new file mode 100644 index 000000000..5935a62ab --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/checker/checker.go @@ -0,0 +1,625 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checker provides an analysis driver based on the +// [golang.org/x/tools/go/packages] representation of a set of +// packages and all their dependencies, as produced by +// [packages.Load]. +// +// It is the core of multichecker (the multi-analyzer driver), +// singlechecker (the single-analyzer driver often used to provide a +// convenient command alongside each analyzer), and analysistest, the +// test driver. +// +// By contrast, the 'go vet' command is based on unitchecker, an +// analysis driver that uses separate analysis--analogous to separate +// compilation--with file-based intermediate results. Like separate +// compilation, it is more scalable, especially for incremental +// analysis of large code bases. Commands based on multichecker and +// singlechecker are capable of detecting when they are being invoked +// by "go vet -vettool=exe" and instead dispatching to unitchecker. +// +// Programs built using this package will, in general, not be usable +// in that way. This package is intended only for use in applications +// that invoke the analysis driver as a subroutine, and need to insert +// additional steps before or after the analysis. +// +// See the Example of how to build a complete analysis driver program. +package checker + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "io" + "log" + "reflect" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/analysisinternal" +) + +// Options specifies options that control the analysis driver. +type Options struct { + // These options correspond to existing flags exposed by multichecker: + Sequential bool // disable parallelism + SanityCheck bool // check fact encoding is ok and deterministic + FactLog io.Writer // if non-nil, log each exported fact to it + + // TODO(adonovan): add ReadFile so that an Overlay specified + // in the [packages.Config] can be communicated via + // Pass.ReadFile to each Analyzer. +} + +// Graph holds the results of a round of analysis, including the graph +// of requested actions (analyzers applied to packages) plus any +// dependent actions that it was necessary to compute. +type Graph struct { + // Roots contains the roots of the action graph. + // Each node (a, p) in the action graph represents the + // application of one analyzer a to one package p. + // (A node thus corresponds to one analysis.Pass instance.) + // Roots holds one action per element of the product + // of the analyzers Γ— packages arguments to Analyze, + // in unspecified order. + // + // Each element of Action.Deps represents an edge in the + // action graph: a dependency from one action to another. + // An edge of the form (a, p) -> (a, p2) indicates that the + // analysis of package p requires information ("facts") from + // the same analyzer applied to one of p's dependencies, p2. + // An edge of the form (a, p) -> (a2, p) indicates that the + // analysis of package p requires information ("results") + // from a different analyzer a2 applied to the same package. + // These two kind of edges are called "vertical" and "horizontal", + // respectively. + Roots []*Action +} + +// All returns an iterator over the action graph in depth-first postorder. +// +// Example: +// +// for act := range graph.All() { +// ... +// } +// +// Clients using go1.22 should iterate using the code below and may +// not assume anything else about the result: +// +// graph.All()(func (act *Action) bool { +// ... +// }) +func (g *Graph) All() actionSeq { + return func(yield func(*Action) bool) { + forEach(g.Roots, func(act *Action) error { + if !yield(act) { + return io.EOF // any error will do + } + return nil + }) + } +} + +// An Action represents one unit of analysis work by the driver: the +// application of one analysis to one package. It provides the inputs +// to and records the outputs of a single analysis.Pass. +// +// Actions form a DAG, both within a package (as different analyzers +// are applied, either in sequence or parallel), and across packages +// (as dependencies are analyzed). +type Action struct { + Analyzer *analysis.Analyzer + Package *packages.Package + IsRoot bool // whether this is a root node of the graph + Deps []*Action + Result any // computed result of Analyzer.run, if any (and if IsRoot) + Err error // error result of Analyzer.run + Diagnostics []analysis.Diagnostic + Duration time.Duration // execution time of this step + + opts *Options + once sync.Once + pass *analysis.Pass + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + inputs map[*analysis.Analyzer]any +} + +func (act *Action) String() string { + return fmt.Sprintf("%s@%s", act.Analyzer, act.Package) +} + +// Analyze runs the specified analyzers on the initial packages. +// +// The initial packages and all dependencies must have been loaded +// using the [packages.LoadAllSyntax] flag, Analyze may need to run +// some analyzer (those that consume and produce facts) on +// dependencies too. +// +// On success, it returns a Graph of actions whose Roots hold one +// item per (a, p) in the cross-product of analyzers and pkgs. +// +// If opts is nil, it is equivalent to new(Options). +func Analyze(analyzers []*analysis.Analyzer, pkgs []*packages.Package, opts *Options) (*Graph, error) { + if opts == nil { + opts = new(Options) + } + + if err := analysis.Validate(analyzers); err != nil { + return nil, err + } + + // Construct the action graph. + // + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + type key struct { + a *analysis.Analyzer + pkg *packages.Package + } + actions := make(map[key]*Action) + + var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *Action + mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *Action { + k := key{a, pkg} + act, ok := actions[k] + if !ok { + act = &Action{Analyzer: a, Package: pkg, opts: opts} + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.Deps = append(act.Deps, mkAction(req, pkg)) + } + + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if len(a.FactTypes) > 0 { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := mkAction(a, pkg.Imports[path]) + act.Deps = append(act.Deps, dep) + } + } + + actions[k] = act + } + return act + } + + // Build nodes for initial packages. + var roots []*Action + for _, a := range analyzers { + for _, pkg := range pkgs { + root := mkAction(a, pkg) + root.IsRoot = true + roots = append(roots, root) + } + } + + // Execute the graph in parallel. + execAll(roots) + + // Ensure that only root Results are visible to caller. + // (The others are considered temporary intermediaries.) + // TODO(adonovan): opt: clear them earlier, so we can + // release large data structures like SSA sooner. + for _, act := range actions { + if !act.IsRoot { + act.Result = nil + } + } + + return &Graph{Roots: roots}, nil +} + +func init() { + // Allow analysistest to access Action.pass, + // for its legacy Result data type. + internal.Pass = func(x any) *analysis.Pass { return x.(*Action).pass } +} + +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +func execAll(actions []*Action) { + var wg sync.WaitGroup + for _, act := range actions { + wg.Add(1) + work := func(act *Action) { + act.exec() + wg.Done() + } + if act.opts.Sequential { + work(act) + } else { + go work(act) + } + } + wg.Wait() +} + +func (act *Action) exec() { act.once.Do(act.execOnce) } + +func (act *Action) execOnce() { + // Analyze dependencies. + execAll(act.Deps) + + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + t0 := time.Now() + defer func() { act.Duration = time.Since(t0) }() + + // Report an error if any dependency failed. + var failed []string + for _, dep := range act.Deps { + if dep.Err != nil { + failed = append(failed, dep.String()) + } + } + if failed != nil { + sort.Strings(failed) + act.Err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]any) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + for _, dep := range act.Deps { + if dep.Package == act.Package { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.Analyzer] = dep.Result + + } else if dep.Analyzer == act.Analyzer { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + // Quick (nonexhaustive) check that the correct go/packages mode bits were used. + // (If there were errors, all bets are off.) + if pkg := act.Package; pkg.Errors == nil { + if pkg.Name == "" || pkg.PkgPath == "" || pkg.Types == nil || pkg.Fset == nil || pkg.TypesSizes == nil { + panic("packages must be loaded with packages.LoadSyntax mode") + } + } + + module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. + if mod := act.Package.Module; mod != nil { + module.Path = mod.Path + module.Version = mod.Version + module.GoVersion = mod.GoVersion + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.Analyzer, + Fset: act.Package.Fset, + Files: act.Package.Syntax, + OtherFiles: act.Package.OtherFiles, + IgnoredFiles: act.Package.IgnoredFiles, + Pkg: act.Package.Types, + TypesInfo: act.Package.TypesInfo, + TypesSizes: act.Package.TypesSizes, + TypeErrors: act.Package.TypeErrors, + Module: module, + + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.Diagnostics = append(act.Diagnostics, d) }, + ImportObjectFact: act.ObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.PackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.AllObjectFacts, + AllPackageFacts: act.AllPackageFacts, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + act.pass = pass + + act.Result, act.Err = func() (any, error) { + if act.Package.IllTyped && !pass.Analyzer.RunDespiteErrors { + return nil, fmt.Errorf("analysis skipped due to errors in package") + } + + result, err := pass.Analyzer.Run(pass) + if err != nil { + return nil, err + } + + // correct result type? + if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want { + return nil, fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + + // resolve diagnostic URLs + for i := range act.Diagnostics { + url, err := analysisflags.ResolveURL(act.Analyzer, act.Diagnostics[i]) + if err != nil { + return nil, err + } + act.Diagnostics[i].URL = url + } + return result, nil + }() + + // Help detect (disallowed) calls after Run. + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *Action) { + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.Package.Types) { + if false { + log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + } + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if act.opts.SanityCheck { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + } + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + // + // The Pass.All{Object,Package}Facts accessors expose too much: + // all facts, of all types, for all dependencies in the action + // graph. Not only does the representation grow quadratically, + // but it violates the separate compilation paradigm, allowing + // analysis implementations to communicate with indirect + // dependencies that are not mentioned in the export data. + // + // It's not clear how to fix this short of a rather expensive + // filtering step after each action that enumerates all the + // objects that would appear in export data, and deletes + // facts associated with objects not in this set. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if act.opts.SanityCheck { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + } + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(new); err != nil { + return nil, err + } + return new, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to other packages, +// that find there way into the API. +// This is an overapproximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + if obj.IsField() { + return true + } + // we can't filter more aggressively than this because we need + // to consider function parameters exported, but have no way + // of telling apart function parameters from local variables. + return obj.Pkg() == pkg + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// ObjectFact retrieves a fact associated with obj, +// and returns true if one was found. +// Given a value ptr of type *T, where *T satisfies Fact, +// ObjectFact copies the value to *ptr. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *Action) ObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *Action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if act.pass.ExportObjectFact == nil { + log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.Package.Types { + log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.Analyzer, act.Package, obj, fact) + } + + key := objectFactKey{obj, factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if log := act.opts.FactLog; log != nil { + objstr := types.ObjectString(obj, (*types.Package).Name) + fmt.Fprintf(log, "%s: object %s has fact %s\n", + act.Package.Fset.Position(obj.Pos()), objstr, fact) + } +} + +// AllObjectFacts returns a new slice containing all object facts of +// the analysis's FactTypes in unspecified order. +// +// See documentation at AllObjectFacts field of [analysis.Pass]. +func (act *Action) AllObjectFacts() []analysis.ObjectFact { + facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for k, fact := range act.objectFacts { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: fact}) + } + return facts +} + +// PackageFact retrieves a fact associated with package pkg, +// which must be this package or one of its dependencies. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *Action) PackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *Action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + + key := packageFactKey{act.pass.Pkg, factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + if log := act.opts.FactLog; log != nil { + fmt.Fprintf(log, "%s: package %s has fact %s\n", + act.Package.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + } +} + +func factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + log.Fatalf("invalid Fact type: got %T, want pointer", fact) + } + return t +} + +// AllPackageFacts returns a new slice containing all package +// facts of the analysis's FactTypes in unspecified order. +// +// See documentation at AllPackageFacts field of [analysis.Pass]. +func (act *Action) AllPackageFacts() []analysis.PackageFact { + facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for k, fact := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: fact}) + } + return facts +} + +// forEach is a utility function for traversing the action graph. It +// applies function f to each action in the graph reachable from +// roots, in depth-first postorder. If any call to f returns an error, +// the traversal is aborted and ForEach returns the error. +func forEach(roots []*Action, f func(*Action) error) error { + seen := make(map[*Action]bool) + var visitAll func(actions []*Action) error + visitAll = func(actions []*Action) error { + for _, act := range actions { + if !seen[act] { + seen[act] = true + if err := visitAll(act.Deps); err != nil { + return err + } + if err := f(act); err != nil { + return err + } + } + } + return nil + } + return visitAll(roots) +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/go/analysis/checker/iter_go122.go similarity index 53% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go121.go rename to vendor/golang.org/x/tools/go/analysis/checker/iter_go122.go index b7ef216df..cd25cce03 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ b/vendor/golang.org/x/tools/go/analysis/checker/iter_go122.go @@ -2,13 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 -// +build go1.21 +//go:build !go1.23 -package versions +package checker -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } -} +// This type is a placeholder for go1.23's iter.Seq[*Action]. +type actionSeq func(yield func(*Action) bool) diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/go/analysis/checker/iter_go123.go similarity index 55% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go119.go rename to vendor/golang.org/x/tools/go/analysis/checker/iter_go123.go index f65beed9d..e8278a9c1 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ b/vendor/golang.org/x/tools/go/analysis/checker/iter_go123.go @@ -2,13 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.19 -// +build go1.19 +//go:build go1.23 -package versions +package checker -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} +import "iter" + +type actionSeq = iter.Seq[*Action] diff --git a/vendor/golang.org/x/tools/go/analysis/checker/print.go b/vendor/golang.org/x/tools/go/analysis/checker/print.go new file mode 100644 index 000000000..d7c043011 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/checker/print.go @@ -0,0 +1,88 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checker + +// This file defines helpers for printing analysis results. +// They should all be pure functions. + +import ( + "bytes" + "fmt" + "go/token" + "io" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" +) + +// PrintText emits diagnostics as plain text to w. +// +// If contextLines is nonnegative, it also prints the +// offending line, plus that many lines of context +// before and after the line. +func (g *Graph) PrintText(w io.Writer, contextLines int) error { + return writeTextDiagnostics(w, g.Roots, contextLines) +} + +func writeTextDiagnostics(w io.Writer, roots []*Action, contextLines int) error { + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + // (We cannot assume that such repeated files were parsed + // only once and use syntax nodes as the key.) + type key struct { + pos token.Position + end token.Position + *analysis.Analyzer + message string + } + seen := make(map[key]bool) + + // TODO(adonovan): opt: plumb errors back from PrintPlain and avoid buffer. + buf := new(bytes.Buffer) + forEach(roots, func(act *Action) error { + if act.Err != nil { + fmt.Fprintf(w, "%s: %v\n", act.Analyzer.Name, act.Err) + } else if act.IsRoot { + for _, diag := range act.Diagnostics { + // We don't display Analyzer.Name/diag.Category + // as most users don't care. + + posn := act.Package.Fset.Position(diag.Pos) + end := act.Package.Fset.Position(diag.End) + k := key{posn, end, act.Analyzer, diag.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + analysisflags.PrintPlain(buf, act.Package.Fset, contextLines, diag) + } + } + return nil + }) + _, err := w.Write(buf.Bytes()) + return err +} + +// PrintJSON emits diagnostics in JSON form to w. +// Diagnostics are shown only for the root nodes, +// but errors (if any) are shown for all dependencies. +func (g *Graph) PrintJSON(w io.Writer) error { + return writeJSONDiagnostics(w, g.Roots) +} + +func writeJSONDiagnostics(w io.Writer, roots []*Action) error { + tree := make(analysisflags.JSONTree) + forEach(roots, func(act *Action) error { + var diags []analysis.Diagnostic + if act.IsRoot { + diags = act.Diagnostics + } + tree.Add(act.Package.Fset, act.Package.ID, act.Analyzer.Name, diags, act.Err) + return nil + }) + return tree.Print(w) +} diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index c638f2758..ee083a2d6 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -12,7 +12,8 @@ import "go/token" // which should be a constant, may be used to classify them. // It is primarily intended to make it easy to look up documentation. // -// If End is provided, the diagnostic is specified to apply to the range between +// All Pos values are interpreted relative to Pass.Fset. If End is +// provided, the diagnostic is specified to apply to the range between // Pos and End. type Diagnostic struct { Pos token.Pos @@ -32,8 +33,17 @@ type Diagnostic struct { URL string // SuggestedFixes is an optional list of fixes to address the - // problem described by the diagnostic, each one representing + // problem described by the diagnostic. Each one represents // an alternative strategy; at most one may be applied. + // + // Fixes for different diagnostics should be treated as + // independent changes to the same baseline file state, + // analogous to a set of git commits all with the same parent. + // Combining fixes requires resolving any conflicts that + // arise, analogous to a git merge. + // Any conflicts that remain may be dealt with, depending on + // the tool, by discarding fixes, consulting the user, or + // aborting the operation. SuggestedFixes []SuggestedFix // Related contains optional secondary positions and messages @@ -57,8 +67,10 @@ type RelatedInformation struct { // // The TextEdits must not overlap, nor contain edits for other packages. type SuggestedFix struct { - // A description for this suggested fix to be shown to a user deciding - // whether to accept it. + // A verb phrase describing the fix, to be shown to + // a user trying to decide whether to accept it. + // + // Example: "Remove the surplus argument" Message string TextEdits []TextEdit } diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go index ff14ff58f..c2445575c 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -250,21 +250,12 @@ const ( setFalse ) -func triStateFlag(name string, value triState, usage string) *triState { - flag.Var(&value, name, usage) - return &value -} - // triState implements flag.Value, flag.Getter, and flag.boolFlag. // They work like boolean flags: we can say vet -printf as well as vet -printf=true func (ts *triState) Get() interface{} { return *ts == setTrue } -func (ts triState) isTrue() bool { - return ts == setTrue -} - func (ts *triState) Set(value string) error { b, err := strconv.ParseBool(value) if err != nil { @@ -316,15 +307,22 @@ var vetLegacyFlags = map[string]string{ } // ---- output helpers common to all drivers ---- +// +// These functions should not depend on global state (flags)! +// Really they belong in a different package. + +// TODO(adonovan): don't accept an io.Writer if we don't report errors. +// Either accept a bytes.Buffer (infallible), or return a []byte. -// PrintPlain prints a diagnostic in plain text form, -// with context specified by the -c flag. -func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) { +// PrintPlain prints a diagnostic in plain text form. +// If contextLines is nonnegative, it also prints the +// offending line plus this many lines of context. +func PrintPlain(out io.Writer, fset *token.FileSet, contextLines int, diag analysis.Diagnostic) { posn := fset.Position(diag.Pos) - fmt.Fprintf(os.Stderr, "%s: %s\n", posn, diag.Message) + fmt.Fprintf(out, "%s: %s\n", posn, diag.Message) - // -c=N: show offending line plus N lines of context. - if Context >= 0 { + // show offending line plus N lines of context. + if contextLines >= 0 { posn := fset.Position(diag.Pos) end := fset.Position(diag.End) if !end.IsValid() { @@ -332,9 +330,9 @@ func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) { } data, _ := os.ReadFile(posn.Filename) lines := strings.Split(string(data), "\n") - for i := posn.Line - Context; i <= end.Line+Context; i++ { + for i := posn.Line - contextLines; i <= end.Line+contextLines; i++ { if 1 <= i && i <= len(lines) { - fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + fmt.Fprintf(out, "%d\t%s\n", i, lines[i-1]) } } } @@ -438,10 +436,11 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis. } } -func (tree JSONTree) Print() { +func (tree JSONTree) Print(out io.Writer) error { data, err := json.MarshalIndent(tree, "", "\t") if err != nil { log.Panicf("internal error: JSON marshaling failed: %v", err) } - fmt.Printf("%s\n", data) + _, err = fmt.Fprintf(out, "%s\n", data) + return err } diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go index 52f0e55aa..0c2fc5e59 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -2,36 +2,37 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package checker defines the implementation of the checker commands. -// The same code drives the multi-analysis driver, the single-analysis -// driver that is conventionally provided for convenience along with -// each analysis package, and the test driver. +// Package internal/checker defines various implementation helpers for +// the singlechecker and multichecker packages, which provide the +// complete main function for an analysis driver executable +// based on go/packages. +// +// (Note: it is not used by the public 'checker' package, since the +// latter provides a set of pure functions for use as building blocks.) package checker +// TODO(adonovan): publish the JSON schema in go/analysis or analysisjson. + import ( - "bytes" - "encoding/gob" - "errors" "flag" "fmt" "go/format" "go/token" - "go/types" + "io" + "io/ioutil" "log" "os" - "reflect" "runtime" "runtime/pprof" "runtime/trace" "sort" "strings" - "sync" "time" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/checker" "golang.org/x/tools/go/analysis/internal/analysisflags" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/diff" "golang.org/x/tools/internal/robustio" ) @@ -79,7 +80,7 @@ func RegisterFlags() { // It provides most of the logic for the main functions of both the // singlechecker and the multi-analysis commands. // It returns the appropriate exit code. -func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { +func Run(args []string, analyzers []*analysis.Analyzer) int { if CPUProfile != "" { f, err := os.Create(CPUProfile) if err != nil { @@ -134,231 +135,173 @@ func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { allSyntax := needFacts(analyzers) initial, err := load(args, allSyntax) if err != nil { - if _, ok := err.(typeParseError); !ok { - // Fail when some of the errors are not - // related to parsing nor typing. - log.Print(err) - return 1 - } - // TODO: filter analyzers based on RunDespiteError? + log.Print(err) + return 1 + } + + pkgsExitCode := 0 + // Print package and module errors regardless of RunDespiteErrors. + // Do not exit if there are errors, yet. + if n := packages.PrintErrors(initial); n > 0 { + pkgsExitCode = 1 + } + + var factLog io.Writer + if dbg('f') { + factLog = os.Stderr } // Run the analysis. - roots := analyze(initial, analyzers) + opts := &checker.Options{ + SanityCheck: dbg('s'), + Sequential: dbg('p'), + FactLog: factLog, + } + if dbg('v') { + log.Printf("building graph of analysis passes") + } + graph, err := checker.Analyze(analyzers, initial, opts) + if err != nil { + log.Print(err) + return 1 + } - // Apply fixes. + // Apply all fixes from the root actions. if Fix { - if err := applyFixes(roots); err != nil { + if err := applyFixes(graph.Roots); err != nil { // Fail when applying fixes failed. log.Print(err) return 1 } } - // Print the results. - return printDiagnostics(roots) -} - -// typeParseError represents a package load error -// that is related to typing and parsing. -type typeParseError struct { - error -} - -// load loads the initial packages. If all loading issues are related to -// typing and parsing, the returned error is of type typeParseError. -func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { - mode := packages.LoadSyntax - if allSyntax { - mode = packages.LoadAllSyntax - } - mode |= packages.NeedModule - conf := packages.Config{ - Mode: mode, - Tests: IncludeTests, - } - initial, err := packages.Load(&conf, patterns...) - if err == nil { - if len(initial) == 0 { - err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) - } else { - err = loadingError(initial) - } + // Print the results. If !RunDespiteErrors and there + // are errors in the packages, this will have 0 exit + // code. Otherwise, we prefer to return exit code + // indicating diagnostics. + if diagExitCode := printDiagnostics(graph); diagExitCode != 0 { + return diagExitCode // there were diagnostics } - return initial, err + return pkgsExitCode // package errors but no diagnostics } -// loadingError checks for issues during the loading of initial -// packages. Returns nil if there are no issues. Returns error -// of type typeParseError if all errors, including those in -// dependencies, are related to typing or parsing. Otherwise, -// a plain error is returned with an appropriate message. -func loadingError(initial []*packages.Package) error { - var err error - if n := packages.PrintErrors(initial); n > 1 { - err = fmt.Errorf("%d errors during loading", n) - } else if n == 1 { - err = errors.New("error during loading") +// printDiagnostics prints diagnostics in text or JSON form +// and returns the appropriate exit code. +func printDiagnostics(graph *checker.Graph) (exitcode int) { + // Print the results. + // With -json, the exit code is always zero. + if analysisflags.JSON { + if err := graph.PrintJSON(os.Stdout); err != nil { + return 1 + } } else { - // no errors - return nil - } - all := true - packages.Visit(initial, nil, func(pkg *packages.Package) { - for _, err := range pkg.Errors { - typeOrParse := err.Kind == packages.TypeError || err.Kind == packages.ParseError - all = all && typeOrParse + if err := graph.PrintText(os.Stderr, analysisflags.Context); err != nil { + return 1 } - }) - if all { - return typeParseError{err} - } - return err -} -// TestAnalyzer applies an analyzer to a set of packages (and their -// dependencies if necessary) and returns the results. -// The analyzer must be valid according to [analysis.Validate]. -// -// Facts about pkg are returned in a map keyed by object; package facts -// have a nil key. -// -// This entry point is used only by analysistest. -func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult { - var results []*TestAnalyzerResult - for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) { - facts := make(map[types.Object][]analysis.Fact) - for key, fact := range act.objectFacts { - if key.obj.Pkg() == act.pass.Pkg { - facts[key.obj] = append(facts[key.obj], fact) - } - } - for key, fact := range act.packageFacts { - if key.pkg == act.pass.Pkg { - facts[nil] = append(facts[nil], fact) + // Compute the exit code. + var numErrors, rootDiags int + // TODO(adonovan): use "for act := range graph.All() { ... }" in go1.23. + graph.All()(func(act *checker.Action) bool { + if act.Err != nil { + numErrors++ + } else if act.IsRoot { + rootDiags += len(act.Diagnostics) } + return true + }) + if numErrors > 0 { + exitcode = 1 // analysis failed, at least partially + } else if rootDiags > 0 { + exitcode = 3 // successfully produced diagnostics } - - results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err}) } - return results -} - -type TestAnalyzerResult struct { - Pass *analysis.Pass - Diagnostics []analysis.Diagnostic - Facts map[types.Object][]analysis.Fact - Result interface{} - Err error -} -func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { - // Construct the action graph. - if dbg('v') { - log.Printf("building graph of analysis passes") - } + // Print timing info. + if dbg('t') { + if !dbg('p') { + log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") + } - // Each graph node (action) is one unit of analysis. - // Edges express package-to-package (vertical) dependencies, - // and analysis-to-analysis (horizontal) dependencies. - type key struct { - *analysis.Analyzer - *packages.Package - } - actions := make(map[key]*action) - - var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action - mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { - k := key{a, pkg} - act, ok := actions[k] - if !ok { - act = &action{a: a, pkg: pkg} - - // Add a dependency on each required analyzers. - for _, req := range a.Requires { - act.deps = append(act.deps, mkAction(req, pkg)) - } + var list []*checker.Action + var total time.Duration + // TODO(adonovan): use "for act := range graph.All() { ... }" in go1.23. + graph.All()(func(act *checker.Action) bool { + list = append(list, act) + total += act.Duration + return true + }) - // An analysis that consumes/produces facts - // must run on the package's dependencies too. - if len(a.FactTypes) > 0 { - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } - sort.Strings(paths) // for determinism - for _, path := range paths { - dep := mkAction(a, pkg.Imports[path]) - act.deps = append(act.deps, dep) - } + // Print actions accounting for 90% of the total. + sort.Slice(list, func(i, j int) bool { + return list[i].Duration > list[j].Duration + }) + var sum time.Duration + for _, act := range list { + fmt.Fprintf(os.Stderr, "%s\t%s\n", act.Duration, act) + sum += act.Duration + if sum >= total*9/10 { + break } - - actions[k] = act } - return act - } - - // Build nodes for initial packages. - var roots []*action - for _, a := range analyzers { - for _, pkg := range pkgs { - root := mkAction(a, pkg) - root.isroot = true - roots = append(roots, root) + if total > sum { + fmt.Fprintf(os.Stderr, "%s\tall others\n", total-sum) } } - // Execute the graph in parallel. - execAll(roots) - - return roots + return exitcode } -func applyFixes(roots []*action) error { - // visit all of the actions and accumulate the suggested edits. - paths := make(map[robustio.FileID]string) - editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit) - visited := make(map[*action]bool) - var apply func(*action) error - var visitAll func(actions []*action) error - visitAll = func(actions []*action) error { - for _, act := range actions { - if !visited[act] { - visited[act] = true - if err := visitAll(act.deps); err != nil { - return err - } - if err := apply(act); err != nil { - return err - } - } - } - return nil +// load loads the initial packages. Returns only top-level loading +// errors. Does not consider errors in packages. +func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { + mode := packages.LoadSyntax + if allSyntax { + mode = packages.LoadAllSyntax + } + mode |= packages.NeedModule + conf := packages.Config{ + Mode: mode, + Tests: IncludeTests, + } + initial, err := packages.Load(&conf, patterns...) + if err == nil && len(initial) == 0 { + err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) } + return initial, err +} - apply = func(act *action) error { +// applyFixes applies suggested fixes associated with diagnostics +// reported by the specified actions. It verifies that edits do not +// conflict, even through file-system level aliases such as symbolic +// links, and then edits the files. +func applyFixes(actions []*checker.Action) error { + // Visit all of the actions and accumulate the suggested edits. + paths := make(map[robustio.FileID]string) + editsByAction := make(map[robustio.FileID]map[*checker.Action][]diff.Edit) + for _, act := range actions { editsForTokenFile := make(map[*token.File][]diff.Edit) - for _, diag := range act.diagnostics { + for _, diag := range act.Diagnostics { for _, sf := range diag.SuggestedFixes { for _, edit := range sf.TextEdits { // Validate the edit. // Any error here indicates a bug in the analyzer. start, end := edit.Pos, edit.End - file := act.pkg.Fset.File(start) + file := act.Package.Fset.File(start) if file == nil { return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", - act.a.Name, start) + act.Analyzer.Name, edit.Pos) } if !end.IsValid() { end = start } if start > end { return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", - act.a.Name, start, end) + act.Analyzer.Name, edit.Pos, edit.End) } if eof := token.Pos(file.Base() + file.Size()); end > eof { return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", - act.a.Name, end, eof) + act.Analyzer.Name, edit.End, eof) } edit := diff.Edit{ Start: file.Offset(start), @@ -377,22 +320,17 @@ func applyFixes(roots []*action) error { } if _, hasId := paths[id]; !hasId { paths[id] = f.Name() - editsByAction[id] = make(map[*action][]diff.Edit) + editsByAction[id] = make(map[*checker.Action][]diff.Edit) } editsByAction[id][act] = edits } - return nil - } - - if err := visitAll(roots); err != nil { - return err } // Validate and group the edits to each actual file. editsByPath := make(map[string][]diff.Edit) for id, actToEdits := range editsByAction { path := paths[id] - actions := make([]*action, 0, len(actToEdits)) + actions := make([]*checker.Action, 0, len(actToEdits)) for act := range actToEdits { actions = append(actions, act) } @@ -401,7 +339,7 @@ func applyFixes(roots []*action) error { for _, act := range actions { edits := actToEdits[act] if _, invalid := validateEdits(edits); invalid > 0 { - name, x, y := act.a.Name, edits[invalid-1], edits[invalid] + name, x, y := act.Analyzer.Name, edits[invalid-1], edits[invalid] return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) } } @@ -410,7 +348,7 @@ func applyFixes(roots []*action) error { for j := range actions { for k := range actions[:j] { x, y := actions[j], actions[k] - if x.a.Name > y.a.Name { + if x.Analyzer.Name > y.Analyzer.Name { x, y = y, x } xedits, yedits := actToEdits[x], actToEdits[y] @@ -419,7 +357,7 @@ func applyFixes(roots []*action) error { // TODO: consider applying each action's consistent list of edits entirely, // and then using a three-way merge (such as GNU diff3) on the resulting // files to report more precisely the parts that actually conflict. - return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits) + return diff3Conflict(path, x.Analyzer.Name, y.Analyzer.Name, xedits, yedits) } } } @@ -432,6 +370,7 @@ func applyFixes(roots []*action) error { } // Now we've got a set of valid edits for each file. Apply them. + // TODO(adonovan): don't abort the operation partway just because one file fails. for path, edits := range editsByPath { // TODO(adonovan): this should really work on the same // gulp from the file system that fed the analyzer (see #62292). @@ -490,7 +429,7 @@ func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { // diff3Conflict returns an error describing two conflicting sets of // edits on a file at path. func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { - contents, err := os.ReadFile(path) + contents, err := ioutil.ReadFile(path) if err != nil { return err } @@ -509,117 +448,6 @@ func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edi xlabel, ylabel, path, xdiff, ydiff) } -// printDiagnostics prints the diagnostics for the root packages in either -// plain text or JSON format. JSON format also includes errors for any -// dependencies. -// -// It returns the exitcode: in plain mode, 0 for success, 1 for analysis -// errors, and 3 for diagnostics. We avoid 2 since the flag package uses -// it. JSON mode always succeeds at printing errors and diagnostics in a -// structured form to stdout. -func printDiagnostics(roots []*action) (exitcode int) { - // Print the output. - // - // Print diagnostics only for root packages, - // but errors for all packages. - printed := make(map[*action]bool) - var print func(*action) - var visitAll func(actions []*action) - visitAll = func(actions []*action) { - for _, act := range actions { - if !printed[act] { - printed[act] = true - visitAll(act.deps) - print(act) - } - } - } - - if analysisflags.JSON { - // JSON output - tree := make(analysisflags.JSONTree) - print = func(act *action) { - var diags []analysis.Diagnostic - if act.isroot { - diags = act.diagnostics - } - tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err) - } - visitAll(roots) - tree.Print() - } else { - // plain text output - - // De-duplicate diagnostics by position (not token.Pos) to - // avoid double-reporting in source files that belong to - // multiple packages, such as foo and foo.test. - type key struct { - pos token.Position - end token.Position - *analysis.Analyzer - message string - } - seen := make(map[key]bool) - - print = func(act *action) { - if act.err != nil { - fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) - exitcode = 1 // analysis failed, at least partially - return - } - if act.isroot { - for _, diag := range act.diagnostics { - // We don't display a.Name/f.Category - // as most users don't care. - - posn := act.pkg.Fset.Position(diag.Pos) - end := act.pkg.Fset.Position(diag.End) - k := key{posn, end, act.a, diag.Message} - if seen[k] { - continue // duplicate - } - seen[k] = true - - analysisflags.PrintPlain(act.pkg.Fset, diag) - } - } - } - visitAll(roots) - - if exitcode == 0 && len(seen) > 0 { - exitcode = 3 // successfully produced diagnostics - } - } - - // Print timing info. - if dbg('t') { - if !dbg('p') { - log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") - } - var all []*action - var total time.Duration - for act := range printed { - all = append(all, act) - total += act.duration - } - sort.Slice(all, func(i, j int) bool { - return all[i].duration > all[j].duration - }) - - // Print actions accounting for 90% of the total. - var sum time.Duration - for _, act := range all { - fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) - sum += act.duration - if sum >= total*9/10 { - break - } - } - } - - return exitcode -} - // needFacts reports whether any analysis required by the specified set // needs facts. If so, we must load the entire program from source. func needFacts(analyzers []*analysis.Analyzer) bool { @@ -640,365 +468,4 @@ func needFacts(analyzers []*analysis.Analyzer) bool { return false } -// An action represents one unit of analysis work: the application of -// one analysis to one package. Actions form a DAG, both within a -// package (as different analyzers are applied, either in sequence or -// parallel), and across packages (as dependencies are analyzed). -type action struct { - once sync.Once - a *analysis.Analyzer - pkg *packages.Package - pass *analysis.Pass - isroot bool - deps []*action - objectFacts map[objectFactKey]analysis.Fact - packageFacts map[packageFactKey]analysis.Fact - result interface{} - diagnostics []analysis.Diagnostic - err error - duration time.Duration -} - -type objectFactKey struct { - obj types.Object - typ reflect.Type -} - -type packageFactKey struct { - pkg *types.Package - typ reflect.Type -} - -func (act *action) String() string { - return fmt.Sprintf("%s@%s", act.a, act.pkg) -} - -func execAll(actions []*action) { - sequential := dbg('p') - var wg sync.WaitGroup - for _, act := range actions { - wg.Add(1) - work := func(act *action) { - act.exec() - wg.Done() - } - if sequential { - work(act) - } else { - go work(act) - } - } - wg.Wait() -} - -func (act *action) exec() { act.once.Do(act.execOnce) } - -func (act *action) execOnce() { - // Analyze dependencies. - execAll(act.deps) - - // TODO(adonovan): uncomment this during profiling. - // It won't build pre-go1.11 but conditional compilation - // using build tags isn't warranted. - // - // ctx, task := trace.NewTask(context.Background(), "exec") - // trace.Log(ctx, "pass", act.String()) - // defer task.End() - - // Record time spent in this node but not its dependencies. - // In parallel mode, due to GC/scheduler contention, the - // time is 5x higher than in sequential mode, even with a - // semaphore limiting the number of threads here. - // So use -debug=tp. - if dbg('t') { - t0 := time.Now() - defer func() { act.duration = time.Since(t0) }() - } - - // Report an error if any dependency failed. - var failed []string - for _, dep := range act.deps { - if dep.err != nil { - failed = append(failed, dep.String()) - } - } - if failed != nil { - sort.Strings(failed) - act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) - return - } - - // Plumb the output values of the dependencies - // into the inputs of this action. Also facts. - inputs := make(map[*analysis.Analyzer]interface{}) - act.objectFacts = make(map[objectFactKey]analysis.Fact) - act.packageFacts = make(map[packageFactKey]analysis.Fact) - for _, dep := range act.deps { - if dep.pkg == act.pkg { - // Same package, different analysis (horizontal edge): - // in-memory outputs of prerequisite analyzers - // become inputs to this analysis pass. - inputs[dep.a] = dep.result - - } else if dep.a == act.a { // (always true) - // Same analysis, different package (vertical edge): - // serialized facts produced by prerequisite analysis - // become available to this analysis pass. - inheritFacts(act, dep) - } - } - - // Run the analysis. - pass := &analysis.Pass{ - Analyzer: act.a, - Fset: act.pkg.Fset, - Files: act.pkg.Syntax, - OtherFiles: act.pkg.OtherFiles, - IgnoredFiles: act.pkg.IgnoredFiles, - Pkg: act.pkg.Types, - TypesInfo: act.pkg.TypesInfo, - TypesSizes: act.pkg.TypesSizes, - TypeErrors: act.pkg.TypeErrors, - - ResultOf: inputs, - Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, - ImportObjectFact: act.importObjectFact, - ExportObjectFact: act.exportObjectFact, - ImportPackageFact: act.importPackageFact, - ExportPackageFact: act.exportPackageFact, - AllObjectFacts: act.allObjectFacts, - AllPackageFacts: act.allPackageFacts, - } - pass.ReadFile = analysisinternal.MakeReadFile(pass) - act.pass = pass - - var err error - if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { - err = fmt.Errorf("analysis skipped due to errors in package") - } else { - act.result, err = pass.Analyzer.Run(pass) - if err == nil { - if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { - err = fmt.Errorf( - "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", - pass.Pkg.Path(), pass.Analyzer, got, want) - } - } - } - if err == nil { // resolve diagnostic URLs - for i := range act.diagnostics { - if url, uerr := analysisflags.ResolveURL(act.a, act.diagnostics[i]); uerr == nil { - act.diagnostics[i].URL = url - } else { - err = uerr // keep the last error - } - } - } - act.err = err - - // disallow calls after Run - pass.ExportObjectFact = nil - pass.ExportPackageFact = nil -} - -// inheritFacts populates act.facts with -// those it obtains from its dependency, dep. -func inheritFacts(act, dep *action) { - serialize := dbg('s') - - for key, fact := range dep.objectFacts { - // Filter out facts related to objects - // that are irrelevant downstream - // (equivalently: not in the compiler export data). - if !exportedFrom(key.obj, dep.pkg.Types) { - if false { - log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) - } - continue - } - - // Optionally serialize/deserialize fact - // to verify that it works across address spaces. - if serialize { - encodedFact, err := codeFact(fact) - if err != nil { - log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) - } - fact = encodedFact - } - - if false { - log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) - } - act.objectFacts[key] = fact - } - - for key, fact := range dep.packageFacts { - // TODO: filter out facts that belong to - // packages not mentioned in the export data - // to prevent side channels. - - // Optionally serialize/deserialize fact - // to verify that it works across address spaces - // and is deterministic. - if serialize { - encodedFact, err := codeFact(fact) - if err != nil { - log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) - } - fact = encodedFact - } - - if false { - log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) - } - act.packageFacts[key] = fact - } -} - -// codeFact encodes then decodes a fact, -// just to exercise that logic. -func codeFact(fact analysis.Fact) (analysis.Fact, error) { - // We encode facts one at a time. - // A real modular driver would emit all facts - // into one encoder to improve gob efficiency. - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(fact); err != nil { - return nil, err - } - - // Encode it twice and assert that we get the same bits. - // This helps detect nondeterministic Gob encoding (e.g. of maps). - var buf2 bytes.Buffer - if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { - return nil, err - } - if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { - return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) - } - - new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) - if err := gob.NewDecoder(&buf).Decode(new); err != nil { - return nil, err - } - return new, nil -} - -// exportedFrom reports whether obj may be visible to a package that imports pkg. -// This includes not just the exported members of pkg, but also unexported -// constants, types, fields, and methods, perhaps belonging to other packages, -// that find there way into the API. -// This is an overapproximation of the more accurate approach used by -// gc export data, which walks the type graph, but it's much simpler. -// -// TODO(adonovan): do more accurate filtering by walking the type graph. -func exportedFrom(obj types.Object, pkg *types.Package) bool { - switch obj := obj.(type) { - case *types.Func: - return obj.Exported() && obj.Pkg() == pkg || - obj.Type().(*types.Signature).Recv() != nil - case *types.Var: - if obj.IsField() { - return true - } - // we can't filter more aggressively than this because we need - // to consider function parameters exported, but have no way - // of telling apart function parameters from local variables. - return obj.Pkg() == pkg - case *types.TypeName, *types.Const: - return true - } - return false // Nil, Builtin, Label, or PkgName -} - -// importObjectFact implements Pass.ImportObjectFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// importObjectFact copies the fact value to *ptr. -func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { - if obj == nil { - panic("nil object") - } - key := objectFactKey{obj, factType(ptr)} - if v, ok := act.objectFacts[key]; ok { - reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) - return true - } - return false -} - -// exportObjectFact implements Pass.ExportObjectFact. -func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { - if act.pass.ExportObjectFact == nil { - log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) - } - - if obj.Pkg() != act.pkg.Types { - log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", - act.a, act.pkg, obj, fact) - } - - key := objectFactKey{obj, factType(fact)} - act.objectFacts[key] = fact // clobber any existing entry - if dbg('f') { - objstr := types.ObjectString(obj, (*types.Package).Name) - fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n", - act.pkg.Fset.Position(obj.Pos()), objstr, fact) - } -} - -// allObjectFacts implements Pass.AllObjectFacts. -func (act *action) allObjectFacts() []analysis.ObjectFact { - facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) - for k := range act.objectFacts { - facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) - } - return facts -} - -// importPackageFact implements Pass.ImportPackageFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// fact copies the fact value to *ptr. -func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { - if pkg == nil { - panic("nil package") - } - key := packageFactKey{pkg, factType(ptr)} - if v, ok := act.packageFacts[key]; ok { - reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) - return true - } - return false -} - -// exportPackageFact implements Pass.ExportPackageFact. -func (act *action) exportPackageFact(fact analysis.Fact) { - if act.pass.ExportPackageFact == nil { - log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) - } - - key := packageFactKey{act.pass.Pkg, factType(fact)} - act.packageFacts[key] = fact // clobber any existing entry - if dbg('f') { - fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n", - act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) - } -} - -func factType(fact analysis.Fact) reflect.Type { - t := reflect.TypeOf(fact) - if t.Kind() != reflect.Ptr { - log.Fatalf("invalid Fact type: got %T, want pointer", fact) - } - return t -} - -// allPackageFacts implements Pass.AllPackageFacts. -func (act *action) allPackageFacts() []analysis.PackageFact { - facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) - for k := range act.packageFacts { - facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) - } - return facts -} - func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } diff --git a/vendor/golang.org/x/tools/go/analysis/internal/internal.go b/vendor/golang.org/x/tools/go/analysis/internal/internal.go new file mode 100644 index 000000000..e7c8247fd --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/internal.go @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import "golang.org/x/tools/go/analysis" + +// This function is set by the checker package to provide +// backdoor access to the private Pass field +// of the checker.Action type, for use by analysistest. +var Pass func(interface{}) *analysis.Pass diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index d77fb203d..1a9b3094e 100644 --- a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -51,7 +51,6 @@ import ( "golang.org/x/tools/go/analysis/internal/analysisflags" "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/facts" - "golang.org/x/tools/internal/versions" ) // A Config describes a compilation unit to be analyzed. @@ -66,6 +65,8 @@ type Config struct { GoFiles []string NonGoFiles []string IgnoredFiles []string + ModulePath string // module path + ModuleVersion string // module version ImportMap map[string]string // maps import path to package path PackageFile map[string]string // maps package path to file of type information Standard map[string]bool // package belongs to standard library @@ -143,7 +144,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) { for _, res := range results { tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err) } - tree.Print() + tree.Print(os.Stdout) } else { // plain text exit := 0 @@ -155,7 +156,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) { } for _, res := range results { for _, diag := range res.diagnostics { - analysisflags.PrintPlain(fset, diag) + analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag) exit = 1 } } @@ -255,15 +256,15 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re GoVersion: cfg.GoVersion, } info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), } - versions.InitFileVersions(info) pkg, err := tc.Check(cfg.ImportPath, fset, files, info) if err != nil { @@ -359,6 +360,12 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re factFilter[reflect.TypeOf(f)] = true } + module := &analysis.Module{ + Path: cfg.ModulePath, + Version: cfg.ModuleVersion, + GoVersion: cfg.GoVersion, + } + pass := &analysis.Pass{ Analyzer: a, Fset: fset, @@ -377,6 +384,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re ImportPackageFact: facts.ImportPackageFact, ExportPackageFact: facts.ExportPackageFact, AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, + Module: module, } pass.ReadFile = analysisinternal.MakeReadFile(pass) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 1fc1de0bd..cfda89343 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -36,6 +36,7 @@ package inspector import ( "go/ast" + _ "unsafe" ) // An Inspector provides methods for inspecting @@ -44,6 +45,9 @@ type Inspector struct { events []event } +//go:linkname events +func events(in *Inspector) []event { return in.events } + // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -52,9 +56,10 @@ func New(files []*ast.File) *Inspector { // An event represents a push or a pop // of an ast.Node during a traversal. type event struct { - node ast.Node - typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events - index int // index of corresponding push or pop event + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int32 // index of corresponding push or pop event + parent int32 // index of parent's push node (defined for push nodes only) } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -73,8 +78,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + mask := maskOf(types) - for i := 0; i < len(in.events); { + for i := int32(0); i < int32(len(in.events)); { ev := in.events[i] if ev.index > i { // push @@ -104,7 +118,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // matches an element of the types slice. func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) - for i := 0; i < len(in.events); { + for i := int32(0); i < int32(len(in.events)); { ev := in.events[i] if ev.index > i { // push @@ -138,7 +152,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node - for i := 0; i < len(in.events); { + for i := int32(0); i < int32(len(in.events)); { ev := in.events[i] if ev.index > i { // push @@ -171,7 +185,9 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s // traverse builds the table of events representing a traversal. func traverse(files []*ast.File) []event { // Preallocate approximate number of events - // based on source file extent. + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) // This makes traverse faster by 4x (!). var extent int for _, f := range files { @@ -185,18 +201,24 @@ func traverse(files []*ast.File) []event { events := make([]event, 0, capacity) var stack []event - stack = append(stack, event{}) // include an extra event so file nodes have a parent + stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent for _, f := range files { ast.Inspect(f, func(n ast.Node) bool { if n != nil { // push ev := event{ - node: n, - typ: 0, // temporarily used to accumulate type bits of subtree - index: len(events), // push event temporarily holds own index + node: n, + typ: 0, // temporarily used to accumulate type bits of subtree + index: int32(len(events)), // push event temporarily holds own index + parent: stack[len(stack)-1].index, } stack = append(stack, ev) events = append(events, ev) + + // 2B nodes ought to be enough for anyone! + if int32(len(events)) < 0 { + panic("event index exceeded int32") + } } else { // pop top := len(stack) - 1 @@ -205,9 +227,9 @@ func traverse(files []*ast.File) []event { push := ev.index parent := top - 1 - events[push].typ = typ // set type of push - stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. - events[push].index = len(events) // make push refer to pop + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = int32(len(events)) // make push refer to pop stack = stack[:top] events = append(events, ev) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 000000000..c576dc70a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 2a872f89d..40b1bfd7e 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,6 +12,8 @@ package inspector import ( "go/ast" "math" + + _ "unsafe" ) const ( @@ -215,6 +217,7 @@ func typeOf(n ast.Node) uint64 { return 0 } +//go:linkname maskOf func maskOf(nodes []ast.Node) uint64 { if nodes == nil { return math.MaxUint64 // match all node types diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df1..65fe2628e 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies @@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index 333676b7c..000000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index a8d7b06ac..f1931d10e 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" @@ -198,14 +207,6 @@ Instead, ssadump no longer requests the runtime package, but seeks it among the dependencies of the user-specified packages, and emits an error if it is not found. -Overlays: The Overlay field in the Config allows providing alternate contents -for Go source files, by providing a mapping from file path to contents. -go/packages will pull in new imports added in overlay files when go/packages -is run in LoadImports mode or greater. -Overlay support for the go list driver isn't complete yet: if the file doesn't -exist on disk, it will only be recognized in an overlay if it is a non-test file -and the package would be reported even without the overlay. - Questions & Tasks - Add GOARCH/GOOS? diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 4335c1eb1..91bd62e83 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" ) @@ -34,8 +35,8 @@ type DriverRequest struct { // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` - // Overlay maps file paths (relative to the driver's working directory) to the byte contents - // of overlay files. + // Overlay maps file paths (relative to the driver's working directory) + // to the contents of overlay files (see Config.Overlay). Overlay map[string][]byte `json:"overlay"` } @@ -79,10 +80,10 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { @@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,9 +118,21 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir - cmd.Env = cfg.Env + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd stdlib has a special feature where if the + // cwd and the PWD are the same node then it trusts + // the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go + // command. + // + // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 22305d9c9..0458b4f9c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -21,7 +21,6 @@ import ( "sync" "unicode" - "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" ) @@ -81,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -312,6 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -495,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, + Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, + ForTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -682,7 +695,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -752,7 +765,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -760,7 +773,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -785,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&needInternalForTest != 0 { + if cfg.Mode&NeedForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -800,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } @@ -841,6 +857,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, + Overlay: state.overlay, } } @@ -849,33 +866,10 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, cfg := state.cfg inv := state.cfgInvocation() - - // For Go versions 1.16 and above, `go list` accepts overlays directly via - // the -overlay flag. Set it, if it's available. - // - // The check for "list" is not necessarily required, but we should avoid - // getting the go version if possible. - if verb == "list" { - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - if goVersion >= 16 { - filename, cleanup, err := state.writeOverlays() - if err != nil { - return nil, err - } - defer cleanup() - inv.Overlay = filename - } - } inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -899,6 +893,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // @@ -1015,67 +1015,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return stdout, nil } -// OverlayJSON is the format overlay files are expected to be in. -// The Replace map maps from overlaid paths to replacement paths: -// the Go command will forward all reads trying to open -// each overlaid path to its replacement path, or consider the overlaid -// path not to exist if the replacement path is empty. -// -// From golang/go#39958. -type OverlayJSON struct { - Replace map[string]string `json:"replace,omitempty"` -} - -// writeOverlays writes out files for go list's -overlay flag, as described -// above. -func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { - // Do nothing if there are no overlays in the config. - if len(state.cfg.Overlay) == 0 { - return "", func() {}, nil - } - dir, err := os.MkdirTemp("", "gopackages-*") - if err != nil { - return "", nil, err - } - // The caller must clean up this directory, unless this function returns an - // error. - cleanup = func() { - os.RemoveAll(dir) - } - defer func() { - if err != nil { - cleanup() - } - }() - overlays := map[string]string{} - for k, v := range state.cfg.Overlay { - // Create a unique filename for the overlaid files, to avoid - // creating nested directories. - noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) - if err != nil { - return "", func() {}, err - } - if _, err := f.Write(v); err != nil { - return "", func() {}, err - } - if err := f.Close(); err != nil { - return "", func() {}, err - } - overlays[k] = f.Name() - } - b, err := json.Marshal(OverlayJSON{Replace: overlays}) - if err != nil { - return "", func() {}, err - } - // Write out the overlay file that contains the filepath mappings. - filename = filepath.Join(dir, "overlay.json") - if err := os.WriteFile(filename, b, 0665); err != nil { - return "", func() {}, err - } - return filename, cleanup, nil -} - func containsGoFile(s []string) bool { for _, f := range s { if strings.HasSuffix(f, ".go") { @@ -1104,3 +1043,44 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21b..69eec9f44 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,48 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 3ea1b3fa4..0147d9080 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,23 +31,45 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. // The bits below can be combined to specify which fields should be // filled in the result packages. +// // The zero value is a special case, equivalent to combining // the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// // ID and Errors (if present) will always be filled. -// Load may return more information than requested. +// [Load] may return more information than requested. +// +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// +// Unfortunately there are a number of open bugs related to +// interactions among the LoadMode bits: +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -66,10 +88,10 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -78,9 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // needInternalForTest adds the internal forTest field. + // NeedForTest adds ForTest. + // // Tests must also be set on the context for this field to be populated. - needInternalForTest + NeedForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -94,27 +117,27 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // NeedTarget adds Target. + NeedTarget + + // Be sure to update loadmode_string.go when adding new items! ) const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadFiles loads the name and file names for the initial packages. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadImports loads the name, file names, and import mapping for the initial packages. LoadImports = LoadFiles | NeedImports - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadTypes loads exported type information for the initial packages. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadSyntax loads typed syntax for the initial packages. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. @@ -123,7 +146,8 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. -// Calls to Load do not modify this struct. +// +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -154,19 +178,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -199,19 +214,46 @@ type Config struct { // setting Tests may have no effect. Tests bool - // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. + // Overlay is a mapping from absolute file paths to file contents. // - // Overlays provide incomplete support for when a given file doesn't - // already exist on disk. See the package doc above for more details. + // For each map entry, [Load] uses the alternative file + // contents provided by the overlay mapping instead of reading + // from the file system. This mechanism can be used to enable + // editor-integrated tools to correctly analyze the contents + // of modified but unsaved buffers, for example. + // + // The overlay mapping is passed to the build system's driver + // (see "The driver protocol") so that it too can report + // consistent package metadata about unsaved files. However, + // drivers may vary in their level of support for overlays. Overlay map[string][]byte + + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. +// +// The [Config.Mode] field is a set of bits that determine what kinds +// of information should be computed and returned. Modes that require +// more information tend to be slower. See [LoadMode] for details +// and important caveats. Its zero value is equivalent to +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. +// +// Each call to Load returns a new set of [Package] instances. +// The Packages and their Imports form a directed acyclic graph. +// +// If the [NeedTypes] mode flag was set, each call to Load uses a new +// [types.Importer], so [types.Object] and [types.Type] values from +// different calls to Load must not be mixed as they will have +// inconsistent notions of type identity. // // If any of the patterns was invalid as defined by the // underlying build system, Load returns an error. @@ -220,7 +262,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -283,10 +325,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + // go list fallback + + // Write overlays once, as there are many calls + // to 'go list' (one per chunk plus others too). + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + if err != nil { + return nil, false, err + } + defer cleanupOverlay() + + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -324,16 +380,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -365,6 +419,9 @@ func mergeResponses(responses ...*DriverResponse) *DriverResponse { } // A Package describes a loaded Go package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Package struct { // ID is a unique identifier for a package, // in a syntax provided by the underlying build system. @@ -380,6 +437,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -419,10 +482,21 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package + // Module is the module information for the package if it exists. + // + // Note: it may be missing for std and cmd; see Go issue #65816. + Module *Module + + // -- The following fields are not part of the driver JSON schema. -- + // Types provides type information for the package. // The NeedTypes LoadMode bit sets this field for packages matching the // patterns; type information for dependencies may be missing or incomplete, @@ -431,15 +505,15 @@ type Package struct { // Each call to [Load] returns a consistent set of type // symbols, as defined by the comment at [types.Identical]. // Avoid mixing type information from two or more calls to [Load]. - Types *types.Package + Types *types.Package `json:"-"` // Fset provides position information for Types, TypesInfo, and Syntax. // It is set only when Types is set. - Fset *token.FileSet + Fset *token.FileSet `json:"-"` // IllTyped indicates whether the package or any dependency contains errors. // It is set only when Types is set. - IllTyped bool + IllTyped bool `json:"-"` // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. // @@ -449,26 +523,28 @@ type Package struct { // // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. - Syntax []*ast.File + Syntax []*ast.File `json:"-"` // TypesInfo provides type information about the package's syntax trees. // It is set only when Syntax is set. - TypesInfo *types.Info + TypesInfo *types.Info `json:"-"` // TypesSizes provides the effective size function for types in TypesInfo. - TypesSizes types.Sizes + TypesSizes types.Sizes `json:"-"` + + // -- internal -- - // forTest is the package under test, if any. - forTest string + // ForTest is the package under test, if any. + ForTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError - - // module is the module information for the package if it exists. - Module *Module } // Module provides module information for a package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Module struct { Path string // module path Version string // module version @@ -488,9 +564,6 @@ type ModuleError struct { } func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } @@ -502,7 +575,6 @@ func init() { } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -601,6 +673,7 @@ func (p *Package) UnmarshalJSON(b []byte) error { OtherFiles: flat.OtherFiles, EmbedFiles: flat.EmbedFiles, EmbedPatterns: flat.EmbedPatterns, + IgnoredFiles: flat.IgnoredFiles, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -617,18 +690,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -674,9 +748,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -690,7 +761,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -699,6 +770,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -730,7 +802,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -755,9 +827,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -776,63 +849,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -845,16 +931,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -896,12 +1011,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -916,31 +1033,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -976,6 +1072,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1081,7 +1181,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1092,16 +1192,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1154,6 +1258,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1218,8 +1326,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1236,20 +1347,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1264,18 +1383,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1434,6 +1556,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } @@ -1442,4 +1568,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b7..df14ffd94 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a2386c347..16ed3c178 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,8 +63,8 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, +// - The TT operators are encoded as [EKPRUTrCa]; +// two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; // three of these (At,Field,Method) require an integer operand, @@ -98,19 +98,21 @@ const ( opType = '.' // .Type() (Object) // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) ) // For is equivalent to new(Encoder).For(obj). @@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { - // generic named type - return Path(r), nil - } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + return Path(r), nil + } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path); r != nil { + return Path(r), nil } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { + return r + } + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) + } + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { + path2 := appendOpArg(path, op, i) + if r := f.find(tparam, path2); r != nil { return r } } @@ -580,10 +619,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { code := suffix[0] suffix = suffix[1:] - // Codes [AFM] have an integer operand. + // Codes [AFMTr] have an integer operand. var index int switch code { - case opAt, opField, opMethod, opTypeParam: + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: rest := strings.TrimLeft(suffix, "0123456789") numerals := suffix[:len(suffix)-len(rest)] suffix = rest @@ -616,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -653,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*types.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { @@ -664,6 +713,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = tparams.At(index) + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + case opConstraint: tparam, ok := t.(*types.TypeParam) if !ok { @@ -725,6 +785,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } } + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + if obj.Pkg() != pkg { return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 000000000..754380351 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 000000000..b81ce0c33 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 000000000..93b3090c6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,467 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil + +import ( + "bytes" + "fmt" + "go/types" + "hash/maphash" + "unsafe" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. +type Map struct { + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher has no effect. +// +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + hash := hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +// -- Hasher -- + +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) +} + +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + return hasher{inGenericSig: false}.hash(t) +} + +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + tparams := t.TypeParams() + for i := range tparams.Len() { + h.inGenericSig = true + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) + + case *types.Named: + hash := h.hashTypeName(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) + } + return hash +} + +func (h hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) + } + return 9173 + 3*uint32(t.Index()) +} + +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + hash := uintptr(unsafe.Pointer(tname)) + return uint32(hash ^ (hash >> 32)) +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashTypeName(t.Obj()) + + case *types.TypeParam: + return h.hashTypeParam(t) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 000000000..f7666028f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 000000000..9dda6a25d --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee4..b9425f5a2 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index c027b9f31..000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b32995484..7716a3331 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,31 +11,51 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) +} + +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *types.Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *types.Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *types.Alias) *types.Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) } // Enabled reports whether [NewAlias] should create [types.Alias] types. @@ -56,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 2c406ded0..58615232f 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -10,281 +10,59 @@ import ( "bytes" "fmt" "go/ast" + "go/scanner" "go/token" "go/types" "os" - "strconv" + pathpkg "path" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/aliases" ) func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. - offset, end := fset.PositionFor(start, false).Offset, start - if offset >= len(src) { - return end + file := fset.File(start) + if file == nil { + return start } - if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 { - end = start + token.Pos(width) + if offset := file.PositionFor(start, false).Offset; offset > len(src) { + return start + } else { + src = src[offset:] } - return end -} -func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - // TODO(adonovan): think about generics, and also generic aliases. - under := aliases.Unalias(typ) - // Don't call Underlying unconditionally: although it removes - // Named and Alias, it also removes TypeParam. - if n, ok := under.(*types.Named); ok { - under = n.Underlying() - } - switch under := under.(type) { - case *types.Basic: - switch { - case under.Info()&types.IsNumeric != 0: - return &ast.BasicLit{Kind: token.INT, Value: "0"} - case under.Info()&types.IsBoolean != 0: - return &ast.Ident{Name: "false"} - case under.Info()&types.IsString != 0: - return &ast.BasicLit{Kind: token.STRING, Value: `""`} - default: - panic(fmt.Sprintf("unknown basic type %v", under)) - } - case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: - return ast.NewIdent("nil") - case *types.Struct: - texpr := TypeExpr(f, pkg, typ) // typ because we want the name here. - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: texpr, + // Attempt to find a reasonable end position for the type error. + // + // TODO(rfindley): the heuristic implemented here is unclear. It looks like + // it seeks the end of the primary operand starting at start, but that is not + // quite implemented (for example, given a func literal this heuristic will + // return the range of the func keyword). + // + // We should formalize this heuristic, or deprecate it by finally proposing + // to add end position to all type checker errors. + // + // Nevertheless, ensure that the end position at least spans the current + // token at the cursor (this was golang/go#69505). + end := start + { + var s scanner.Scanner + fset := token.NewFileSet() + f := fset.AddFile("", fset.Base(), len(src)) + s.Init(f, src, nil /* no error handler */, scanner.ScanComments) + pos, tok, lit := s.Scan() + if tok != token.SEMICOLON && token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size()) { + off := file.Offset(pos) + len(lit) + src = src[off:] + end += token.Pos(off) } } - return nil -} -// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of -// analysisinternal.ZeroValue) -func IsZeroValue(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false + // Look for bytes that might terminate the current operand. See note above: + // this is imprecise. + if width := bytes.IndexAny(src, " \n,():;[]+-*/"); width > 0 { + end += token.Pos(width) } -} - -// TypeExpr returns syntax for the specified type. References to -// named types from packages other than pkg are qualified by an appropriate -// package name, as defined by the import environment of file. -func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { - case *types.Basic: - switch t.Kind() { - case types.UnsafePointer: - return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} - default: - return ast.NewIdent(t.Name()) - } - case *types.Pointer: - x := TypeExpr(f, pkg, t.Elem()) - if x == nil { - return nil - } - return &ast.UnaryExpr{ - Op: token.MUL, - X: x, - } - case *types.Array: - elt := TypeExpr(f, pkg, t.Elem()) - if elt == nil { - return nil - } - return &ast.ArrayType{ - Len: &ast.BasicLit{ - Kind: token.INT, - Value: fmt.Sprintf("%d", t.Len()), - }, - Elt: elt, - } - case *types.Slice: - elt := TypeExpr(f, pkg, t.Elem()) - if elt == nil { - return nil - } - return &ast.ArrayType{ - Elt: elt, - } - case *types.Map: - key := TypeExpr(f, pkg, t.Key()) - value := TypeExpr(f, pkg, t.Elem()) - if key == nil || value == nil { - return nil - } - return &ast.MapType{ - Key: key, - Value: value, - } - case *types.Chan: - dir := ast.ChanDir(t.Dir()) - if t.Dir() == types.SendRecv { - dir = ast.SEND | ast.RECV - } - value := TypeExpr(f, pkg, t.Elem()) - if value == nil { - return nil - } - return &ast.ChanType{ - Dir: dir, - Value: value, - } - case *types.Signature: - var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { - p := TypeExpr(f, pkg, t.Params().At(i).Type()) - if p == nil { - return nil - } - params = append(params, &ast.Field{ - Type: p, - Names: []*ast.Ident{ - { - Name: t.Params().At(i).Name(), - }, - }, - }) - } - if t.Variadic() { - last := params[len(params)-1] - last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} - } - var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { - r := TypeExpr(f, pkg, t.Results().At(i).Type()) - if r == nil { - return nil - } - returns = append(returns, &ast.Field{ - Type: r, - }) - } - return &ast.FuncType{ - Params: &ast.FieldList{ - List: params, - }, - Results: &ast.FieldList{ - List: returns, - }, - } - case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} - if t.Obj().Pkg() == nil { - return ast.NewIdent(t.Obj().Name()) - } - if t.Obj().Pkg() == pkg { - return ast.NewIdent(t.Obj().Name()) - } - pkgName := t.Obj().Pkg().Name() - - // If the file already imports the package under another name, use that. - for _, cand := range f.Imports { - if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { - if cand.Name != nil && cand.Name.Name != "" { - pkgName = cand.Name.Name - } - } - } - if pkgName == "." { - return ast.NewIdent(t.Obj().Name()) - } - return &ast.SelectorExpr{ - X: ast.NewIdent(pkgName), - Sel: ast.NewIdent(t.Obj().Name()), - } - case *types.Struct: - return ast.NewIdent(t.String()) - case *types.Interface: - return ast.NewIdent(t.String()) - default: - return nil - } -} - -// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. -// Some examples: -// -// Basic Example: -// z := 1 -// y := z + x -// If x is undeclared, then this function would return `y := z + x`, so that we -// can insert `x := ` on the line before `y := z + x`. -// -// If stmt example: -// if z == 1 { -// } else if z == y {} -// If y is undeclared, then this function would return `if z == 1 {`, because we cannot -// insert a statement between an if and an else if statement. As a result, we need to find -// the top of the if chain to insert `y := ` before. -func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { - enclosingIndex := -1 - for i, p := range path { - if _, ok := p.(ast.Stmt); ok { - enclosingIndex = i - break - } - } - if enclosingIndex == -1 { - return nil - } - enclosingStmt := path[enclosingIndex] - switch enclosingStmt.(type) { - case *ast.IfStmt: - // The enclosingStmt is inside of the if declaration, - // We need to check if we are in an else-if stmt and - // get the base if statement. - return baseIfStmt(path, enclosingIndex) - case *ast.CaseClause: - // Get the enclosing switch stmt if the enclosingStmt is - // inside of the case statement. - for i := enclosingIndex + 1; i < len(path); i++ { - if node, ok := path[i].(*ast.SwitchStmt); ok { - return node - } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { - return node - } - } - } - if len(path) <= enclosingIndex+1 { - return enclosingStmt.(ast.Stmt) - } - // Check if the enclosing statement is inside another node. - switch expr := path[enclosingIndex+1].(type) { - case *ast.IfStmt: - // Get the base if statement. - return baseIfStmt(path, enclosingIndex+1) - case *ast.ForStmt: - if expr.Init == enclosingStmt || expr.Post == enclosingStmt { - return expr - } - } - return enclosingStmt.(ast.Stmt) -} - -// baseIfStmt walks up the if/else-if chain until we get to -// the top of the current if chain. -func baseIfStmt(path []ast.Node, index int) ast.Stmt { - stmt := path[index] - for i := index + 1; i < len(path); i++ { - if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { - stmt = node - continue - } - break - } - return stmt.(ast.Stmt) + return end } // WalkASTWithParent walks the AST rooted at n. The semantics are @@ -399,22 +177,21 @@ func equivalentTypes(want, got types.Type) bool { // MakeReadFile returns a simple implementation of the Pass.ReadFile function. func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) { return func(filename string) ([]byte, error) { - if err := checkReadable(pass, filename); err != nil { + if err := CheckReadable(pass, filename); err != nil { return nil, err } return os.ReadFile(filename) } } -// checkReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. -func checkReadable(pass *analysis.Pass, filename string) error { +// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. +func CheckReadable(pass *analysis.Pass, filename string) error { if slicesContains(pass.OtherFiles, filename) || slicesContains(pass.IgnoredFiles, filename) { return nil } for _, f := range pass.Files { - // TODO(adonovan): use go1.20 f.FileStart - if pass.Fset.File(f.Pos()).Name() == filename { + if pass.Fset.File(f.FileStart).Name() == filename { return nil } } @@ -430,3 +207,81 @@ func slicesContains[S ~[]E, E comparable](slice S, x E) bool { } return false } + +// AddImport checks whether this file already imports pkgpath and +// that import is in scope at pos. If so, it returns the name under +// which it was imported and a zero edit. Otherwise, it adds a new +// import of pkgpath, using a name derived from the preferred name, +// and returns the chosen name along with the edit for the new import. +// +// It does not mutate its arguments. +func AddImport(info *types.Info, file *ast.File, pos token.Pos, pkgpath, preferredName string) (name string, newImport []analysis.TextEdit) { + // Find innermost enclosing lexical block. + scope := info.Scopes[file].Innermost(pos) + if scope == nil { + panic("no enclosing lexical block") + } + + // Is there an existing import of this package? + // If so, are we in its scope? (not shadowed) + for _, spec := range file.Imports { + pkgname, ok := importedPkgName(info, spec) + if ok && pkgname.Imported().Path() == pkgpath { + if _, obj := scope.LookupParent(pkgname.Name(), pos); obj == pkgname { + return pkgname.Name(), nil + } + } + } + + // We must add a new import. + // Ensure we have a fresh name. + newName := preferredName + for i := 0; ; i++ { + if _, obj := scope.LookupParent(newName, pos); obj == nil { + break // fresh + } + newName = fmt.Sprintf("%s%d", preferredName, i) + } + + // For now, keep it real simple: create a new import + // declaration before the first existing declaration (which + // must exist), including its comments, and let goimports tidy it up. + // + // Use a renaming import whenever the preferred name is not + // available, or the chosen name does not match the last + // segment of its path. + newText := fmt.Sprintf("import %q\n\n", pkgpath) + if newName != preferredName || newName != pathpkg.Base(pkgpath) { + newText = fmt.Sprintf("import %s %q\n\n", newName, pkgpath) + } + decl0 := file.Decls[0] + var before ast.Node = decl0 + switch decl0 := decl0.(type) { + case *ast.GenDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + case *ast.FuncDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + } + return newName, []analysis.TextEdit{{ + Pos: before.Pos(), + End: before.Pos(), + NewText: []byte(newText), + }} +} + +// importedPkgName returns the PkgName object declared by an ImportSpec. +// TODO(adonovan): use go1.22's Info.PkgNameOf. +func importedPkgName(info *types.Info, imp *ast.ImportSpec) (*types.PkgName, bool) { + var obj types.Object + if imp.Name != nil { + obj = info.Defs[imp.Name] + } else { + obj = info.Implicits[imp] + } + pkgname, ok := obj.(*types.PkgName) + return pkgname, ok +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 4353da15b..7c74b47bb 100644 --- a/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -199,6 +199,7 @@ func (e *editGraph) bdone(D, k int) (bool, lcs) { } // run the backward algorithm, until success or up to the limit on D. +// (used only by tests) func backward(e *editGraph) lcs { e.setBackward(0, 0, e.ux) if ok, ans := e.bdone(0, 0); ok { diff --git a/vendor/golang.org/x/tools/internal/facts/imports.go b/vendor/golang.org/x/tools/internal/facts/imports.go index 9f706cd95..ed5ec5fa1 100644 --- a/vendor/golang.org/x/tools/internal/facts/imports.go +++ b/vendor/golang.org/x/tools/internal/facts/imports.go @@ -8,6 +8,7 @@ import ( "go/types" "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) // importMap computes the import map for a package by traversing the @@ -47,32 +48,41 @@ func importMap(imports []*types.Package) map[string]*types.Package { addType = func(T types.Type) { switch T := T.(type) { - case *aliases.Alias: - addType(aliases.Unalias(T)) case *types.Basic: // nop - case *types.Named: + case typesinternal.NamedOrAlias: // *types.{Named,Alias} + // Add the type arguments if this is an instance. + if targs := typesinternal.TypeArgs(T); targs.Len() > 0 { + for i := 0; i < targs.Len(); i++ { + addType(targs.At(i)) + } + } + // Remove infinite expansions of *types.Named by always looking at the origin. // Some named types with type parameters [that will not type check] have // infinite expansions: // type N[T any] struct { F *N[N[T]] } // importMap() is called on such types when Analyzer.RunDespiteErrors is true. - T = T.Origin() + T = typesinternal.Origin(T) if !typs[T] { typs[T] = true + + // common aspects addObj(T.Obj()) - addType(T.Underlying()) - for i := 0; i < T.NumMethods(); i++ { - addObj(T.Method(i)) - } - if tparams := T.TypeParams(); tparams != nil { + if tparams := typesinternal.TypeParams(T); tparams.Len() > 0 { for i := 0; i < tparams.Len(); i++ { addType(tparams.At(i)) } } - if targs := T.TypeArgs(); targs != nil { - for i := 0; i < targs.Len(); i++ { - addType(targs.At(i)) + + // variant aspects + switch T := T.(type) { + case *types.Alias: + addType(aliases.Rhs(T)) + case *types.Named: + addType(T.Underlying()) + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) } } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2a..d79a605ed 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb1..5662a311d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,49 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) if err != nil { return } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') + if err != nil { + return + } + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) return } - name = strings.TrimSpace(string(hdr[:16])) + return } -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. +// +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≀ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } + + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") + return + } + + return +} - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte + + // objapi header should be the first line + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + objapi = string(line) + + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) + return + } + + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { return } + if string(line) == "$$" { + return // stop + } - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + // read next header + line, err = r.ReadSlice('\n') + if err != nil { return } - size -= int64(len(line)) + headers = append(headers, string(line)) } +} - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { return } - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { return } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + return } + n = len(hdr) + 1 // + 1 is for 'u' return } + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir β†’ func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df91124..3dbd21d1b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,125 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir β†’ func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -210,57 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string - var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + data, err := ReadUnified(buf) + if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f31..7dfc31a37 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,11 +242,30 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa0365..69b1d697c 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,9 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. +// See iexport.go for the export data format. package gcimporter @@ -53,6 +51,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -540,7 +539,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -557,19 +556,28 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 + r.declare(obj) case constTag: typ, val := r.value() @@ -589,6 +597,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -615,7 +626,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +656,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -852,7 +863,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +873,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +970,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1062,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1098,3 +1109,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 000000000..7586bfaca --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d0..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40f..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 000000000..907c8557a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 000000000..4af810dc4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b6..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "Β·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cad..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c0770688..6cdab448e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,7 +11,6 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" @@ -52,8 +51,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -72,7 +70,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -110,13 +107,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +166,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -263,7 +264,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe @@ -471,7 +477,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +533,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +565,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -632,7 +644,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +741,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "Β·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index eb7a8282f..e333efc87 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,13 +8,14 @@ package gocommand import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io" "log" "os" "os/exec" - "reflect" + "path/filepath" "regexp" "runtime" "strconv" @@ -167,7 +168,9 @@ type Invocation struct { // TODO(rfindley): remove, in favor of Args. ModFile string - // If Overlay is set, the go command is invoked with -overlay=Overlay. + // Overlay is the name of the JSON overlay file that describes + // unsaved editor buffers; see [WriteOverlays]. + // If set, the go command is invoked with -overlay=Overlay. // TODO(rfindley): remove, in favor of Args. Overlay string @@ -196,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io return } -func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { - log := i.Logf - if log == nil { - log = func(string, ...interface{}) {} +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) } +} +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { goArgs := []string{i.Verb} appendModFile := func() { @@ -244,23 +249,23 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } - - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second + + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. + // + // os.Getwd has a special feature where if the cwd and the PWD + // are the same node then it trusts the PWD, so by setting it + // in the env for the child process we fix up all the paths + // returned by the go command. if !i.CleanEnv { cmd.Env = os.Environ() } @@ -270,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() return runCmdContext(ctx, cmd) } @@ -351,6 +361,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } } + startTime := time.Now() err = cmd.Start() if stdoutW != nil { // The child process has inherited the pipe file, @@ -377,7 +388,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(cmd.Process) + HandleHangingGoCommand(startTime, cmd) case <-ctx.Done(): } } else { @@ -411,7 +422,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(proc *os.Process) { +func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { switch runtime.GOOS { case "linux", "darwin", "freebsd", "netbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND @@ -444,7 +455,7 @@ See golang/go#54461 for more details.`) panic(fmt.Sprintf("running %s: %v", listFiles, err)) } } - panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) + panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) } func cmdDebugStr(cmd *exec.Cmd) string { @@ -468,3 +479,73 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// WriteOverlays writes each value in the overlay (see the Overlay +// field of go/packages.Config) to a temporary file and returns the name +// of a JSON file describing the mapping that is suitable for the "go +// list -overlay" flag. +// +// On success, the caller must call the cleanup function exactly once +// when the files are no longer needed. +func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(overlay) == 0 { + return "", func() {}, nil + } + + dir, err := os.MkdirTemp("", "gocommand-*") + if err != nil { + return "", nil, err + } + + // The caller must clean up this directory, + // unless this function returns an error. + // (The cleanup operand of each return + // statement below is ignored.) + defer func() { + cleanup = func() { + os.RemoveAll(dir) + } + if err != nil { + cleanup() + cleanup = nil + } + }() + + // Write each map entry to a temporary file. + overlays := make(map[string]string) + for k, v := range overlay { + // Use a unique basename for each file (001-foo.go), + // to avoid creating nested directories. + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) + filename := filepath.Join(dir, base) + err := os.WriteFile(filename, v, 0666) + if err != nil { + return "", nil, err + } + overlays[k] = filename + } + + // Write the JSON overlay file that maps logical file names to temp files. + // + // OverlayJSON is the format overlay files are expected to be in. + // The Replace map maps from overlaid paths to replacement paths: + // the Go command will forward all reads trying to open + // each overlaid path to its replacement path, or consider the overlaid + // path not to exist if the replacement path is empty. + // + // From golang/go#39958. + type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", nil, err + } + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0666); err != nil { + return "", nil, err + } + + return filename, nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 44719de17..66e69b438 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,7 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } type PackageError struct { @@ -16,7 +15,6 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var ForTest int // must be set as a LoadMode to call GetForTest var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2acd85851..f6cb37c5c 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,10 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 - - // aliases determines whether types.Aliases should be created - aliases bool + version Version // sync indicates whether the file uses sync markers. sync bool @@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, - //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to @@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -248,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a4..c17a12399 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7ad..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b28..50534a295 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef71..1520b73af 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5f..582ad56d3 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 000000000..53af9df22 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index fd6892075..9f0b871ff 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrWriteAfterClose", Var, 0}, {"ErrWriteTooLong", Var, 0}, {"FileInfoHeader", Func, 1}, + {"FileInfoNames", Type, 23}, {"Format", Type, 10}, {"FormatGNU", Const, 10}, {"FormatPAX", Const, 10}, @@ -267,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{ {"ErrTooLarge", Var, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -279,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"MinRead", Const, 0}, {"NewBuffer", Func, 0}, @@ -292,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -534,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{ {"NewCTR", Func, 0}, {"NewGCM", Func, 2}, {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithRandomNonce", Func, 24}, {"NewGCMWithTagSize", Func, 11}, {"NewOFB", Func, 0}, {"Stream", Type, 0}, @@ -672,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{ {"Unmarshal", Func, 0}, {"UnmarshalCompressed", Func, 15}, }, + "crypto/fips140": { + {"Enabled", Func, 24}, + }, + "crypto/hkdf": { + {"Expand", Func, 24}, + {"Extract", Func, 24}, + {"Key", Func, 24}, + }, "crypto/hmac": { {"Equal", Func, 1}, {"New", Func, 0}, @@ -682,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{ {"Size", Const, 0}, {"Sum", Func, 2}, }, + "crypto/mlkem": { + {"(*DecapsulationKey1024).Bytes", Method, 24}, + {"(*DecapsulationKey1024).Decapsulate", Method, 24}, + {"(*DecapsulationKey1024).EncapsulationKey", Method, 24}, + {"(*DecapsulationKey768).Bytes", Method, 24}, + {"(*DecapsulationKey768).Decapsulate", Method, 24}, + {"(*DecapsulationKey768).EncapsulationKey", Method, 24}, + {"(*EncapsulationKey1024).Bytes", Method, 24}, + {"(*EncapsulationKey1024).Encapsulate", Method, 24}, + {"(*EncapsulationKey768).Bytes", Method, 24}, + {"(*EncapsulationKey768).Encapsulate", Method, 24}, + {"CiphertextSize1024", Const, 24}, + {"CiphertextSize768", Const, 24}, + {"DecapsulationKey1024", Type, 24}, + {"DecapsulationKey768", Type, 24}, + {"EncapsulationKey1024", Type, 24}, + {"EncapsulationKey768", Type, 24}, + {"EncapsulationKeySize1024", Const, 24}, + {"EncapsulationKeySize768", Const, 24}, + {"GenerateKey1024", Func, 24}, + {"GenerateKey768", Func, 24}, + {"NewDecapsulationKey1024", Func, 24}, + {"NewDecapsulationKey768", Func, 24}, + {"NewEncapsulationKey1024", Func, 24}, + {"NewEncapsulationKey768", Func, 24}, + {"SeedSize", Const, 24}, + {"SharedKeySize", Const, 24}, + }, + "crypto/pbkdf2": { + {"Key", Func, 24}, + }, "crypto/rand": { {"Int", Func, 0}, {"Prime", Func, 0}, {"Read", Func, 0}, {"Reader", Var, 0}, + {"Text", Func, 24}, }, "crypto/rc4": { {"(*Cipher).Reset", Method, 0}, @@ -765,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{ {"Sum224", Func, 2}, {"Sum256", Func, 2}, }, + "crypto/sha3": { + {"(*SHA3).AppendBinary", Method, 24}, + {"(*SHA3).BlockSize", Method, 24}, + {"(*SHA3).MarshalBinary", Method, 24}, + {"(*SHA3).Reset", Method, 24}, + {"(*SHA3).Size", Method, 24}, + {"(*SHA3).Sum", Method, 24}, + {"(*SHA3).UnmarshalBinary", Method, 24}, + {"(*SHA3).Write", Method, 24}, + {"(*SHAKE).AppendBinary", Method, 24}, + {"(*SHAKE).BlockSize", Method, 24}, + {"(*SHAKE).MarshalBinary", Method, 24}, + {"(*SHAKE).Read", Method, 24}, + {"(*SHAKE).Reset", Method, 24}, + {"(*SHAKE).UnmarshalBinary", Method, 24}, + {"(*SHAKE).Write", Method, 24}, + {"New224", Func, 24}, + {"New256", Func, 24}, + {"New384", Func, 24}, + {"New512", Func, 24}, + {"NewCSHAKE128", Func, 24}, + {"NewCSHAKE256", Func, 24}, + {"NewSHAKE128", Func, 24}, + {"NewSHAKE256", Func, 24}, + {"SHA3", Type, 24}, + {"SHAKE", Type, 24}, + {"Sum224", Func, 24}, + {"Sum256", Func, 24}, + {"Sum384", Func, 24}, + {"Sum512", Func, 24}, + {"SumSHAKE128", Func, 24}, + {"SumSHAKE256", Func, 24}, + }, "crypto/sha512": { {"BlockSize", Const, 0}, {"New", Func, 0}, @@ -787,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConstantTimeEq", Func, 0}, {"ConstantTimeLessOrEq", Func, 2}, {"ConstantTimeSelect", Func, 0}, + {"WithDataIndependentTiming", Func, 24}, {"XORBytes", Func, 20}, }, "crypto/tls": { @@ -820,6 +901,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, {"(*Dialer).Dial", Method, 15}, {"(*Dialer).DialContext", Method, 15}, + {"(*ECHRejectionError).Error", Method, 23}, {"(*QUICConn).Close", Method, 21}, {"(*QUICConn).ConnectionState", Method, 21}, {"(*QUICConn).HandleData", Method, 21}, @@ -827,6 +909,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*QUICConn).SendSessionTicket", Method, 21}, {"(*QUICConn).SetTransportParameters", Method, 21}, {"(*QUICConn).Start", Method, 21}, + {"(*QUICConn).StoreSession", Method, 23}, {"(*SessionState).Bytes", Method, 21}, {"(AlertError).Error", Method, 21}, {"(ClientAuthType).String", Method, 15}, @@ -861,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo", Type, 4}, {"ClientHelloInfo.CipherSuites", Field, 4}, {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.Extensions", Field, 24}, {"ClientHelloInfo.ServerName", Field, 4}, {"ClientHelloInfo.SignatureSchemes", Field, 8}, {"ClientHelloInfo.SupportedCurves", Field, 4}, @@ -877,6 +961,9 @@ var PackageSymbols = map[string][]Symbol{ {"Config.ClientSessionCache", Field, 3}, {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.EncryptedClientHelloConfigList", Field, 23}, + {"Config.EncryptedClientHelloKeys", Field, 24}, + {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, {"Config.GetConfigForClient", Field, 8}, @@ -902,6 +989,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConnectionState", Type, 0}, {"ConnectionState.CipherSuite", Field, 0}, {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.ECHAccepted", Field, 23}, {"ConnectionState.HandshakeComplete", Field, 0}, {"ConnectionState.NegotiatedProtocol", Field, 0}, {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, @@ -925,7 +1013,13 @@ var PackageSymbols = map[string][]Symbol{ {"ECDSAWithP384AndSHA384", Const, 8}, {"ECDSAWithP521AndSHA512", Const, 8}, {"ECDSAWithSHA1", Const, 10}, + {"ECHRejectionError", Type, 23}, + {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, + {"EncryptedClientHelloKey", Type, 24}, + {"EncryptedClientHelloKey.Config", Field, 24}, + {"EncryptedClientHelloKey.PrivateKey", Field, 24}, + {"EncryptedClientHelloKey.SendAsRetry", Field, 24}, {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, {"LoadX509KeyPair", Func, 0}, @@ -943,6 +1037,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, @@ -954,16 +1049,20 @@ var PackageSymbols = map[string][]Symbol{ {"QUICEvent.Data", Field, 21}, {"QUICEvent.Kind", Field, 21}, {"QUICEvent.Level", Field, 21}, + {"QUICEvent.SessionState", Field, 23}, {"QUICEvent.Suite", Field, 21}, {"QUICEventKind", Type, 21}, {"QUICHandshakeDone", Const, 21}, {"QUICNoEvent", Const, 21}, {"QUICRejectedEarlyData", Const, 21}, + {"QUICResumeSession", Const, 23}, {"QUICServer", Func, 21}, {"QUICSessionTicketOptions", Type, 21}, {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSessionTicketOptions.Extra", Field, 23}, {"QUICSetReadSecret", Const, 21}, {"QUICSetWriteSecret", Const, 21}, + {"QUICStoreSession", Const, 23}, {"QUICTransportParameters", Const, 21}, {"QUICTransportParametersRequired", Const, 21}, {"QUICWriteData", Const, 21}, @@ -1019,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{ {"VersionTLS12", Const, 2}, {"VersionTLS13", Const, 12}, {"X25519", Const, 8}, + {"X25519MLKEM768", Const, 24}, {"X509KeyPair", Func, 0}, }, "crypto/x509": { @@ -1036,13 +1136,19 @@ var PackageSymbols = map[string][]Symbol{ {"(*Certificate).Verify", Method, 0}, {"(*Certificate).VerifyHostname", Method, 0}, {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*OID).UnmarshalBinary", Method, 23}, + {"(*OID).UnmarshalText", Method, 23}, {"(*RevocationList).CheckSignatureFrom", Method, 19}, {"(CertificateInvalidError).Error", Method, 0}, {"(ConstraintViolationError).Error", Method, 0}, {"(HostnameError).Error", Method, 0}, {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).AppendBinary", Method, 24}, + {"(OID).AppendText", Method, 24}, {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, + {"(OID).MarshalBinary", Method, 23}, + {"(OID).MarshalText", Method, 23}, {"(OID).String", Method, 22}, {"(PublicKeyAlgorithm).String", Method, 10}, {"(SignatureAlgorithm).String", Method, 6}, @@ -1067,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.Extensions", Field, 2}, {"Certificate.ExtraExtensions", Field, 2}, {"Certificate.IPAddresses", Field, 1}, + {"Certificate.InhibitAnyPolicy", Field, 24}, + {"Certificate.InhibitAnyPolicyZero", Field, 24}, + {"Certificate.InhibitPolicyMapping", Field, 24}, + {"Certificate.InhibitPolicyMappingZero", Field, 24}, {"Certificate.IsCA", Field, 0}, {"Certificate.Issuer", Field, 0}, {"Certificate.IssuingCertificateURL", Field, 2}, @@ -1083,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.PermittedURIDomains", Field, 10}, {"Certificate.Policies", Field, 22}, {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PolicyMappings", Field, 24}, {"Certificate.PublicKey", Field, 0}, {"Certificate.PublicKeyAlgorithm", Field, 0}, {"Certificate.Raw", Field, 0}, @@ -1090,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.RawSubject", Field, 0}, {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.RequireExplicitPolicy", Field, 24}, + {"Certificate.RequireExplicitPolicyZero", Field, 24}, {"Certificate.SerialNumber", Field, 0}, {"Certificate.Signature", Field, 0}, {"Certificate.SignatureAlgorithm", Field, 0}, @@ -1181,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{ {"NameConstraintsWithoutSANs", Const, 10}, {"NameMismatch", Const, 8}, {"NewCertPool", Func, 0}, + {"NoValidChains", Const, 24}, {"NotAuthorizedToSign", Const, 0}, {"OID", Type, 22}, {"OIDFromInts", Func, 22}, @@ -1196,11 +1310,15 @@ var PackageSymbols = map[string][]Symbol{ {"ParseCertificates", Func, 0}, {"ParseDERCRL", Func, 0}, {"ParseECPrivateKey", Func, 1}, + {"ParseOID", Func, 23}, {"ParsePKCS1PrivateKey", Func, 0}, {"ParsePKCS1PublicKey", Func, 10}, {"ParsePKCS8PrivateKey", Func, 0}, {"ParsePKIXPublicKey", Func, 0}, {"ParseRevocationList", Func, 19}, + {"PolicyMapping", Type, 24}, + {"PolicyMapping.IssuerDomainPolicy", Field, 24}, + {"PolicyMapping.SubjectDomainPolicy", Field, 24}, {"PublicKeyAlgorithm", Type, 0}, {"PureEd25519", Const, 13}, {"RSA", Const, 0}, @@ -1247,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{ {"UnknownPublicKeyAlgorithm", Const, 0}, {"UnknownSignatureAlgorithm", Const, 0}, {"VerifyOptions", Type, 0}, + {"VerifyOptions.CertificatePolicies", Field, 24}, {"VerifyOptions.CurrentTime", Field, 0}, {"VerifyOptions.DNSName", Field, 0}, {"VerifyOptions.Intermediates", Field, 0}, @@ -1957,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).DynString", Method, 1}, {"(*File).DynValue", Method, 21}, {"(*File).DynamicSymbols", Method, 4}, + {"(*File).DynamicVersionNeeds", Method, 24}, + {"(*File).DynamicVersions", Method, 24}, {"(*File).ImportedLibraries", Method, 0}, {"(*File).ImportedSymbols", Method, 0}, {"(*File).Section", Method, 0}, @@ -2222,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{ {"DynFlag", Type, 0}, {"DynFlag1", Type, 21}, {"DynTag", Type, 0}, + {"DynamicVersion", Type, 24}, + {"DynamicVersion.Deps", Field, 24}, + {"DynamicVersion.Flags", Field, 24}, + {"DynamicVersion.Index", Field, 24}, + {"DynamicVersion.Name", Field, 24}, + {"DynamicVersionDep", Type, 24}, + {"DynamicVersionDep.Dep", Field, 24}, + {"DynamicVersionDep.Flags", Field, 24}, + {"DynamicVersionDep.Index", Field, 24}, + {"DynamicVersionFlag", Type, 24}, + {"DynamicVersionNeed", Type, 24}, + {"DynamicVersionNeed.Name", Field, 24}, + {"DynamicVersionNeed.Needs", Field, 24}, {"EI_ABIVERSION", Const, 0}, {"EI_CLASS", Const, 0}, {"EI_DATA", Const, 0}, @@ -2541,6 +2675,7 @@ var PackageSymbols = map[string][]Symbol{ {"PT_NOTE", Const, 0}, {"PT_NULL", Const, 0}, {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_NOBTCFI", Const, 23}, {"PT_OPENBSD_RANDOMIZE", Const, 16}, {"PT_OPENBSD_WXNEEDED", Const, 16}, {"PT_PAX_FLAGS", Const, 16}, @@ -3620,13 +3755,16 @@ var PackageSymbols = map[string][]Symbol{ {"STT_COMMON", Const, 0}, {"STT_FILE", Const, 0}, {"STT_FUNC", Const, 0}, + {"STT_GNU_IFUNC", Const, 23}, {"STT_HIOS", Const, 0}, {"STT_HIPROC", Const, 0}, {"STT_LOOS", Const, 0}, {"STT_LOPROC", Const, 0}, {"STT_NOTYPE", Const, 0}, {"STT_OBJECT", Const, 0}, + {"STT_RELC", Const, 23}, {"STT_SECTION", Const, 0}, + {"STT_SRELC", Const, 23}, {"STT_TLS", Const, 0}, {"STV_DEFAULT", Const, 0}, {"STV_HIDDEN", Const, 0}, @@ -3704,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Size", Field, 0}, {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, + {"Symbol.VersionIndex", Field, 24}, + {"Symbol.VersionScope", Field, 24}, + {"SymbolVersionScope", Type, 24}, {"Type", Type, 0}, + {"VER_FLG_BASE", Const, 24}, + {"VER_FLG_INFO", Const, 24}, + {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, + {"VersionScopeGlobal", Const, 24}, + {"VersionScopeHidden", Const, 24}, + {"VersionScopeLocal", Const, 24}, + {"VersionScopeNone", Const, 24}, + {"VersionScopeSpecific", Const, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -4431,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16}, }, "encoding": { + {"BinaryAppender", Type, 24}, {"BinaryMarshaler", Type, 2}, {"BinaryUnmarshaler", Type, 2}, + {"TextAppender", Type, 24}, {"TextMarshaler", Type, 2}, {"TextUnmarshaler", Type, 2}, }, @@ -4544,11 +4695,14 @@ var PackageSymbols = map[string][]Symbol{ {"URLEncoding", Var, 0}, }, "encoding/binary": { + {"Append", Func, 23}, {"AppendByteOrder", Type, 19}, {"AppendUvarint", Func, 19}, {"AppendVarint", Func, 19}, {"BigEndian", Var, 0}, {"ByteOrder", Type, 0}, + {"Decode", Func, 23}, + {"Encode", Func, 23}, {"LittleEndian", Var, 0}, {"MaxVarintLen16", Const, 0}, {"MaxVarintLen32", Const, 0}, @@ -5308,6 +5462,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Rparen", Field, 0}, {"ParenExpr.X", Field, 0}, {"Pkg", Const, 0}, + {"Preorder", Func, 23}, {"Print", Func, 0}, {"RECV", Const, 0}, {"RangeStmt", Type, 0}, @@ -5898,7 +6053,12 @@ var PackageSymbols = map[string][]Symbol{ }, "go/types": { {"(*Alias).Obj", Method, 22}, + {"(*Alias).Origin", Method, 23}, + {"(*Alias).Rhs", Method, 23}, + {"(*Alias).SetTypeParams", Method, 23}, {"(*Alias).String", Method, 22}, + {"(*Alias).TypeArgs", Method, 23}, + {"(*Alias).TypeParams", Method, 23}, {"(*Alias).Underlying", Method, 22}, {"(*ArgumentError).Error", Method, 18}, {"(*ArgumentError).Unwrap", Method, 18}, @@ -5943,6 +6103,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Func).Pkg", Method, 5}, {"(*Func).Pos", Method, 5}, {"(*Func).Scope", Method, 5}, + {"(*Func).Signature", Method, 23}, {"(*Func).String", Method, 5}, {"(*Func).Type", Method, 5}, {"(*Info).ObjectOf", Method, 5}, @@ -5952,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{ {"(*Interface).Complete", Method, 5}, {"(*Interface).Embedded", Method, 5}, {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).EmbeddedTypes", Method, 24}, {"(*Interface).Empty", Method, 5}, {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).ExplicitMethods", Method, 24}, {"(*Interface).IsComparable", Method, 18}, {"(*Interface).IsImplicit", Method, 18}, {"(*Interface).IsMethodSet", Method, 18}, {"(*Interface).MarkImplicit", Method, 18}, {"(*Interface).Method", Method, 5}, + {"(*Interface).Methods", Method, 24}, {"(*Interface).NumEmbeddeds", Method, 5}, {"(*Interface).NumExplicitMethods", Method, 5}, {"(*Interface).NumMethods", Method, 5}, @@ -5979,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*MethodSet).At", Method, 5}, {"(*MethodSet).Len", Method, 5}, {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).Methods", Method, 24}, {"(*MethodSet).String", Method, 5}, {"(*Named).AddMethod", Method, 5}, {"(*Named).Method", Method, 5}, + {"(*Named).Methods", Method, 24}, {"(*Named).NumMethods", Method, 5}, {"(*Named).Obj", Method, 5}, {"(*Named).Origin", Method, 18}, @@ -6022,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).String", Method, 5}, {"(*Pointer).Underlying", Method, 5}, {"(*Scope).Child", Method, 5}, + {"(*Scope).Children", Method, 24}, {"(*Scope).Contains", Method, 5}, {"(*Scope).End", Method, 5}, {"(*Scope).Innermost", Method, 5}, @@ -6057,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*StdSizes).Offsetsof", Method, 5}, {"(*StdSizes).Sizeof", Method, 5}, {"(*Struct).Field", Method, 5}, + {"(*Struct).Fields", Method, 24}, {"(*Struct).NumFields", Method, 5}, {"(*Struct).String", Method, 5}, {"(*Struct).Tag", Method, 5}, @@ -6068,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Tuple).Len", Method, 5}, {"(*Tuple).String", Method, 5}, {"(*Tuple).Underlying", Method, 5}, + {"(*Tuple).Variables", Method, 24}, {"(*TypeList).At", Method, 18}, {"(*TypeList).Len", Method, 18}, + {"(*TypeList).Types", Method, 24}, {"(*TypeName).Exported", Method, 5}, {"(*TypeName).Id", Method, 5}, {"(*TypeName).IsAlias", Method, 9}, @@ -6087,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeParam).Underlying", Method, 18}, {"(*TypeParamList).At", Method, 18}, {"(*TypeParamList).Len", Method, 18}, + {"(*TypeParamList).TypeParams", Method, 24}, {"(*Union).Len", Method, 18}, {"(*Union).String", Method, 18}, {"(*Union).Term", Method, 18}, + {"(*Union).Terms", Method, 24}, {"(*Union).Underlying", Method, 18}, {"(*Var).Anonymous", Method, 5}, {"(*Var).Embedded", Method, 11}, @@ -6360,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*Hash).WriteByte", Method, 14}, {"(*Hash).WriteString", Method, 14}, {"Bytes", Func, 19}, + {"Comparable", Func, 24}, {"Hash", Type, 14}, {"MakeSeed", Func, 14}, {"Seed", Type, 14}, {"String", Func, 19}, + {"WriteComparable", Func, 24}, }, "html": { {"EscapeString", Func, 0}, @@ -6992,6 +7166,12 @@ var PackageSymbols = map[string][]Symbol{ {"TempFile", Func, 0}, {"WriteFile", Func, 0}, }, + "iter": { + {"Pull", Func, 23}, + {"Pull2", Func, 23}, + {"Seq", Type, 23}, + {"Seq2", Type, 23}, + }, "log": { {"(*Logger).Fatal", Method, 0}, {"(*Logger).Fatalf", Method, 0}, @@ -7044,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*JSONHandler).WithGroup", Method, 21}, {"(*Level).UnmarshalJSON", Method, 21}, {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).AppendText", Method, 24}, {"(*LevelVar).Level", Method, 21}, {"(*LevelVar).MarshalText", Method, 21}, {"(*LevelVar).Set", Method, 21}, @@ -7072,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Attr).Equal", Method, 21}, {"(Attr).String", Method, 21}, {"(Kind).String", Method, 21}, + {"(Level).AppendText", Method, 24}, {"(Level).Level", Method, 21}, {"(Level).MarshalJSON", Method, 21}, {"(Level).MarshalText", Method, 21}, @@ -7102,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{ {"Debug", Func, 21}, {"DebugContext", Func, 21}, {"Default", Func, 21}, + {"DiscardHandler", Var, 24}, {"Duration", Func, 21}, {"DurationValue", Func, 21}, {"Error", Func, 21}, @@ -7222,11 +7405,16 @@ var PackageSymbols = map[string][]Symbol{ {"Writer", Type, 0}, }, "maps": { + {"All", Func, 23}, {"Clone", Func, 21}, + {"Collect", Func, 23}, {"Copy", Func, 21}, {"DeleteFunc", Func, 21}, {"Equal", Func, 21}, {"EqualFunc", Func, 21}, + {"Insert", Func, 23}, + {"Keys", Func, 23}, + {"Values", Func, 23}, }, "math": { {"Abs", Func, 0}, @@ -7332,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Float).Acc", Method, 5}, {"(*Float).Add", Method, 5}, {"(*Float).Append", Method, 5}, + {"(*Float).AppendText", Method, 24}, {"(*Float).Cmp", Method, 5}, {"(*Float).Copy", Method, 5}, {"(*Float).Float32", Method, 5}, @@ -7378,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).And", Method, 0}, {"(*Int).AndNot", Method, 0}, {"(*Int).Append", Method, 6}, + {"(*Int).AppendText", Method, 24}, {"(*Int).Binomial", Method, 0}, {"(*Int).Bit", Method, 0}, {"(*Int).BitLen", Method, 0}, @@ -7434,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).Xor", Method, 0}, {"(*Rat).Abs", Method, 0}, {"(*Rat).Add", Method, 0}, + {"(*Rat).AppendText", Method, 24}, {"(*Rat).Cmp", Method, 0}, {"(*Rat).Denom", Method, 0}, {"(*Rat).Float32", Method, 4}, @@ -7616,10 +7807,13 @@ var PackageSymbols = map[string][]Symbol{ {"Zipf", Type, 0}, }, "math/rand/v2": { + {"(*ChaCha8).AppendBinary", Method, 24}, {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).AppendBinary", Method, 24}, {"(*PCG).MarshalBinary", Method, 22}, {"(*PCG).Seed", Method, 22}, {"(*PCG).Uint64", Method, 22}, @@ -7636,6 +7830,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).NormFloat64", Method, 22}, {"(*Rand).Perm", Method, 22}, {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint", Method, 23}, {"(*Rand).Uint32", Method, 22}, {"(*Rand).Uint32N", Method, 22}, {"(*Rand).Uint64", Method, 22}, @@ -7663,6 +7858,7 @@ var PackageSymbols = map[string][]Symbol{ {"Rand", Type, 22}, {"Shuffle", Func, 22}, {"Source", Type, 22}, + {"Uint", Func, 23}, {"Uint32", Func, 22}, {"Uint32N", Func, 22}, {"Uint64", Func, 22}, @@ -7743,6 +7939,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Error", Method, 0}, {"(*DNSError).Temporary", Method, 0}, {"(*DNSError).Timeout", Method, 0}, + {"(*DNSError).Unwrap", Method, 23}, {"(*Dialer).Dial", Method, 1}, {"(*Dialer).DialContext", Method, 7}, {"(*Dialer).MultipathTCP", Method, 21}, @@ -7809,6 +8006,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*TCPConn).RemoteAddr", Method, 0}, {"(*TCPConn).SetDeadline", Method, 0}, {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAliveConfig", Method, 23}, {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, {"(*TCPConn).SetLinger", Method, 0}, {"(*TCPConn).SetNoDelay", Method, 0}, @@ -7883,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SyscallConn", Method, 10}, {"(Flags).String", Method, 0}, {"(HardwareAddr).String", Method, 0}, + {"(IP).AppendText", Method, 24}, {"(IP).DefaultMask", Method, 0}, {"(IP).Equal", Method, 0}, {"(IP).IsGlobalUnicast", Method, 0}, @@ -7922,6 +8121,7 @@ var PackageSymbols = map[string][]Symbol{ {"DNSError.IsTimeout", Field, 0}, {"DNSError.Name", Field, 0}, {"DNSError.Server", Field, 0}, + {"DNSError.UnwrapErr", Field, 23}, {"DefaultResolver", Var, 8}, {"Dial", Func, 0}, {"DialIP", Func, 0}, @@ -7937,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{ {"Dialer.DualStack", Field, 2}, {"Dialer.FallbackDelay", Field, 5}, {"Dialer.KeepAlive", Field, 3}, + {"Dialer.KeepAliveConfig", Field, 23}, {"Dialer.LocalAddr", Field, 1}, {"Dialer.Resolver", Field, 8}, {"Dialer.Timeout", Field, 1}, @@ -7989,10 +8190,16 @@ var PackageSymbols = map[string][]Symbol{ {"Interfaces", Func, 0}, {"InvalidAddrError", Type, 0}, {"JoinHostPort", Func, 0}, + {"KeepAliveConfig", Type, 23}, + {"KeepAliveConfig.Count", Field, 23}, + {"KeepAliveConfig.Enable", Field, 23}, + {"KeepAliveConfig.Idle", Field, 23}, + {"KeepAliveConfig.Interval", Field, 23}, {"Listen", Func, 0}, {"ListenConfig", Type, 11}, {"ListenConfig.Control", Field, 11}, {"ListenConfig.KeepAlive", Field, 13}, + {"ListenConfig.KeepAliveConfig", Field, 23}, {"ListenIP", Func, 0}, {"ListenMulticastUDP", Func, 0}, {"ListenPacket", Func, 0}, @@ -8075,12 +8282,16 @@ var PackageSymbols = map[string][]Symbol{ {"(*MaxBytesError).Error", Method, 19}, {"(*ProtocolError).Error", Method, 0}, {"(*ProtocolError).Is", Method, 21}, + {"(*Protocols).SetHTTP1", Method, 24}, + {"(*Protocols).SetHTTP2", Method, 24}, + {"(*Protocols).SetUnencryptedHTTP2", Method, 24}, {"(*Request).AddCookie", Method, 0}, {"(*Request).BasicAuth", Method, 4}, {"(*Request).Clone", Method, 13}, {"(*Request).Context", Method, 7}, {"(*Request).Cookie", Method, 0}, {"(*Request).Cookies", Method, 0}, + {"(*Request).CookiesNamed", Method, 23}, {"(*Request).FormFile", Method, 0}, {"(*Request).FormValue", Method, 0}, {"(*Request).MultipartReader", Method, 0}, @@ -8133,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14}, {"(Header).Write", Method, 0}, {"(Header).WriteSubset", Method, 0}, + {"(Protocols).HTTP1", Method, 24}, + {"(Protocols).HTTP2", Method, 24}, + {"(Protocols).String", Method, 24}, + {"(Protocols).UnencryptedHTTP2", Method, 24}, {"AllowQuerySemicolons", Func, 17}, {"CanonicalHeaderKey", Func, 0}, {"Client", Type, 0}, @@ -8148,7 +8363,9 @@ var PackageSymbols = map[string][]Symbol{ {"Cookie.HttpOnly", Field, 0}, {"Cookie.MaxAge", Field, 0}, {"Cookie.Name", Field, 0}, + {"Cookie.Partitioned", Field, 23}, {"Cookie.Path", Field, 0}, + {"Cookie.Quoted", Field, 23}, {"Cookie.Raw", Field, 0}, {"Cookie.RawExpires", Field, 0}, {"Cookie.SameSite", Field, 11}, @@ -8193,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{ {"FileSystem", Type, 0}, {"Flusher", Type, 0}, {"Get", Func, 0}, + {"HTTP2Config", Type, 24}, + {"HTTP2Config.CountError", Field, 24}, + {"HTTP2Config.MaxConcurrentStreams", Field, 24}, + {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxReadFrameSize", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24}, + {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24}, + {"HTTP2Config.PingTimeout", Field, 24}, + {"HTTP2Config.SendPingTimeout", Field, 24}, + {"HTTP2Config.WriteByteTimeout", Field, 24}, {"Handle", Func, 0}, {"HandleFunc", Func, 0}, {"Handler", Type, 0}, @@ -8225,12 +8454,15 @@ var PackageSymbols = map[string][]Symbol{ {"NoBody", Var, 8}, {"NotFound", Func, 0}, {"NotFoundHandler", Func, 0}, + {"ParseCookie", Func, 23}, {"ParseHTTPVersion", Func, 0}, + {"ParseSetCookie", Func, 23}, {"ParseTime", Func, 1}, {"Post", Func, 0}, {"PostForm", Func, 0}, {"ProtocolError", Type, 0}, {"ProtocolError.ErrorString", Field, 0}, + {"Protocols", Type, 24}, {"ProxyFromEnvironment", Func, 0}, {"ProxyURL", Func, 0}, {"PushOptions", Type, 8}, @@ -8252,6 +8484,7 @@ var PackageSymbols = map[string][]Symbol{ {"Request.Host", Field, 0}, {"Request.Method", Field, 0}, {"Request.MultipartForm", Field, 0}, + {"Request.Pattern", Field, 23}, {"Request.PostForm", Field, 1}, {"Request.Proto", Field, 0}, {"Request.ProtoMajor", Field, 0}, @@ -8299,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{ {"Server.ConnState", Field, 3}, {"Server.DisableGeneralOptionsHandler", Field, 20}, {"Server.ErrorLog", Field, 3}, + {"Server.HTTP2", Field, 24}, {"Server.Handler", Field, 0}, {"Server.IdleTimeout", Field, 8}, {"Server.MaxHeaderBytes", Field, 0}, + {"Server.Protocols", Field, 24}, {"Server.ReadHeaderTimeout", Field, 8}, {"Server.ReadTimeout", Field, 0}, {"Server.TLSConfig", Field, 0}, @@ -8391,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{ {"Transport.ExpectContinueTimeout", Field, 6}, {"Transport.ForceAttemptHTTP2", Field, 13}, {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.HTTP2", Field, 24}, {"Transport.IdleConnTimeout", Field, 7}, {"Transport.MaxConnsPerHost", Field, 11}, {"Transport.MaxIdleConns", Field, 7}, {"Transport.MaxIdleConnsPerHost", Field, 0}, {"Transport.MaxResponseHeaderBytes", Field, 7}, {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Protocols", Field, 24}, {"Transport.Proxy", Field, 0}, {"Transport.ProxyConnectHeader", Field, 8}, {"Transport.ReadBufferSize", Field, 13}, @@ -8453,6 +8690,7 @@ var PackageSymbols = map[string][]Symbol{ {"DefaultRemoteAddr", Const, 0}, {"NewRecorder", Func, 0}, {"NewRequest", Func, 7}, + {"NewRequestWithContext", Func, 23}, {"NewServer", Func, 0}, {"NewTLSServer", Func, 0}, {"NewUnstartedServer", Func, 0}, @@ -8583,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*AddrPort).UnmarshalText", Method, 18}, {"(*Prefix).UnmarshalBinary", Method, 18}, {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendBinary", Method, 24}, + {"(Addr).AppendText", Method, 24}, {"(Addr).AppendTo", Method, 18}, {"(Addr).As16", Method, 18}, {"(Addr).As4", Method, 18}, @@ -8613,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Addr).WithZone", Method, 18}, {"(Addr).Zone", Method, 18}, {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendBinary", Method, 24}, + {"(AddrPort).AppendText", Method, 24}, {"(AddrPort).AppendTo", Method, 18}, {"(AddrPort).Compare", Method, 22}, {"(AddrPort).IsValid", Method, 18}, @@ -8621,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{ {"(AddrPort).Port", Method, 18}, {"(AddrPort).String", Method, 18}, {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendBinary", Method, 24}, + {"(Prefix).AppendText", Method, 24}, {"(Prefix).AppendTo", Method, 18}, {"(Prefix).Bits", Method, 18}, {"(Prefix).Contains", Method, 18}, @@ -8805,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Error).Temporary", Method, 6}, {"(*Error).Timeout", Method, 6}, {"(*Error).Unwrap", Method, 13}, + {"(*URL).AppendBinary", Method, 24}, {"(*URL).EscapedFragment", Method, 15}, {"(*URL).EscapedPath", Method, 5}, {"(*URL).Hostname", Method, 8}, @@ -8904,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Close", Method, 24}, + {"(*Root).Create", Method, 24}, + {"(*Root).FS", Method, 24}, + {"(*Root).Lstat", Method, 24}, + {"(*Root).Mkdir", Method, 24}, + {"(*Root).Name", Method, 24}, + {"(*Root).Open", Method, 24}, + {"(*Root).OpenFile", Method, 24}, + {"(*Root).OpenRoot", Method, 24}, + {"(*Root).Remove", Method, 24}, + {"(*Root).Stat", Method, 24}, {"(*SyscallError).Error", Method, 0}, {"(*SyscallError).Timeout", Method, 10}, {"(*SyscallError).Unwrap", Method, 13}, @@ -8917,6 +9173,7 @@ var PackageSymbols = map[string][]Symbol{ {"Chown", Func, 0}, {"Chtimes", Func, 0}, {"Clearenv", Func, 0}, + {"CopyFS", Func, 23}, {"Create", Func, 0}, {"CreateTemp", Func, 16}, {"DevNull", Const, 0}, @@ -8996,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{ {"O_WRONLY", Const, 0}, {"Open", Func, 0}, {"OpenFile", Func, 0}, + {"OpenInRoot", Func, 24}, + {"OpenRoot", Func, 24}, {"PathError", Type, 0}, {"PathError.Err", Field, 0}, {"PathError.Op", Field, 0}, @@ -9017,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{ {"Remove", Func, 0}, {"RemoveAll", Func, 0}, {"Rename", Func, 0}, + {"Root", Type, 24}, {"SEEK_CUR", Const, 0}, {"SEEK_END", Const, 0}, {"SEEK_SET", Const, 0}, @@ -9150,6 +9410,7 @@ var PackageSymbols = map[string][]Symbol{ {"IsLocal", Func, 20}, {"Join", Func, 0}, {"ListSeparator", Const, 0}, + {"Localize", Func, 23}, {"Match", Func, 0}, {"Rel", Func, 0}, {"Separator", Const, 0}, @@ -9232,6 +9493,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).Pointer", Method, 0}, {"(Value).Recv", Method, 0}, {"(Value).Send", Method, 0}, + {"(Value).Seq", Method, 23}, + {"(Value).Seq2", Method, 23}, {"(Value).Set", Method, 0}, {"(Value).SetBool", Method, 0}, {"(Value).SetBytes", Method, 0}, @@ -9314,6 +9577,7 @@ var PackageSymbols = map[string][]Symbol{ {"SelectSend", Const, 1}, {"SendDir", Const, 0}, {"Slice", Const, 0}, + {"SliceAt", Func, 23}, {"SliceHeader", Type, 0}, {"SliceHeader.Cap", Field, 0}, {"SliceHeader.Data", Field, 0}, @@ -9354,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{ {"Zero", Func, 0}, }, "regexp": { + {"(*Regexp).AppendText", Method, 24}, {"(*Regexp).Copy", Method, 6}, {"(*Regexp).Expand", Method, 0}, {"(*Regexp).ExpandString", Method, 0}, @@ -9534,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*StackRecord).Stack", Method, 0}, {"(*TypeAssertionError).Error", Method, 0}, {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"(Cleanup).Stop", Method, 24}, + {"AddCleanup", Func, 24}, {"BlockProfile", Func, 1}, {"BlockProfileRecord", Type, 1}, {"BlockProfileRecord.Count", Field, 1}, @@ -9544,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{ {"Caller", Func, 0}, {"Callers", Func, 0}, {"CallersFrames", Func, 7}, + {"Cleanup", Type, 24}, {"Compiler", Const, 0}, {"Error", Type, 0}, {"Frame", Type, 7}, @@ -9655,6 +9923,7 @@ var PackageSymbols = map[string][]Symbol{ {"BuildSetting", Type, 18}, {"BuildSetting.Key", Field, 18}, {"BuildSetting.Value", Field, 18}, + {"CrashOptions", Type, 23}, {"FreeOSMemory", Func, 1}, {"GCStats", Type, 1}, {"GCStats.LastGC", Field, 1}, @@ -9672,6 +9941,7 @@ var PackageSymbols = map[string][]Symbol{ {"PrintStack", Func, 0}, {"ReadBuildInfo", Func, 12}, {"ReadGCStats", Func, 1}, + {"SetCrashOutput", Func, 23}, {"SetGCPercent", Func, 1}, {"SetMaxStack", Func, 2}, {"SetMaxThreads", Func, 2}, @@ -9742,10 +10012,15 @@ var PackageSymbols = map[string][]Symbol{ {"WithRegion", Func, 11}, }, "slices": { + {"All", Func, 23}, + {"AppendSeq", Func, 23}, + {"Backward", Func, 23}, {"BinarySearch", Func, 21}, {"BinarySearchFunc", Func, 21}, + {"Chunk", Func, 23}, {"Clip", Func, 21}, {"Clone", Func, 21}, + {"Collect", Func, 23}, {"Compact", Func, 21}, {"CompactFunc", Func, 21}, {"Compare", Func, 21}, @@ -9767,11 +10042,16 @@ var PackageSymbols = map[string][]Symbol{ {"MaxFunc", Func, 21}, {"Min", Func, 21}, {"MinFunc", Func, 21}, + {"Repeat", Func, 23}, {"Replace", Func, 21}, {"Reverse", Func, 21}, {"Sort", Func, 21}, {"SortFunc", Func, 21}, {"SortStableFunc", Func, 21}, + {"Sorted", Func, 23}, + {"SortedFunc", Func, 23}, + {"SortedStableFunc", Func, 23}, + {"Values", Func, 23}, }, "sort": { {"(Float64Slice).Len", Method, 0}, @@ -9894,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{ {"EqualFold", Func, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -9906,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"NewReader", Func, 0}, {"NewReplacer", Func, 0}, @@ -9917,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -9936,10 +10221,14 @@ var PackageSymbols = map[string][]Symbol{ {"TrimSpace", Func, 0}, {"TrimSuffix", Func, 1}, }, + "structs": { + {"HostLayout", Type, 23}, + }, "sync": { {"(*Cond).Broadcast", Method, 0}, {"(*Cond).Signal", Method, 0}, {"(*Cond).Wait", Method, 0}, + {"(*Map).Clear", Method, 23}, {"(*Map).CompareAndDelete", Method, 20}, {"(*Map).CompareAndSwap", Method, 20}, {"(*Map).Delete", Method, 9}, @@ -9986,13 +10275,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*Bool).Store", Method, 19}, {"(*Bool).Swap", Method, 19}, {"(*Int32).Add", Method, 19}, + {"(*Int32).And", Method, 23}, {"(*Int32).CompareAndSwap", Method, 19}, {"(*Int32).Load", Method, 19}, + {"(*Int32).Or", Method, 23}, {"(*Int32).Store", Method, 19}, {"(*Int32).Swap", Method, 19}, {"(*Int64).Add", Method, 19}, + {"(*Int64).And", Method, 23}, {"(*Int64).CompareAndSwap", Method, 19}, {"(*Int64).Load", Method, 19}, + {"(*Int64).Or", Method, 23}, {"(*Int64).Store", Method, 19}, {"(*Int64).Swap", Method, 19}, {"(*Pointer).CompareAndSwap", Method, 19}, @@ -10000,18 +10293,24 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).Store", Method, 19}, {"(*Pointer).Swap", Method, 19}, {"(*Uint32).Add", Method, 19}, + {"(*Uint32).And", Method, 23}, {"(*Uint32).CompareAndSwap", Method, 19}, {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Or", Method, 23}, {"(*Uint32).Store", Method, 19}, {"(*Uint32).Swap", Method, 19}, {"(*Uint64).Add", Method, 19}, + {"(*Uint64).And", Method, 23}, {"(*Uint64).CompareAndSwap", Method, 19}, {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Or", Method, 23}, {"(*Uint64).Store", Method, 19}, {"(*Uint64).Swap", Method, 19}, {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).And", Method, 23}, {"(*Uintptr).CompareAndSwap", Method, 19}, {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Or", Method, 23}, {"(*Uintptr).Store", Method, 19}, {"(*Uintptr).Swap", Method, 19}, {"(*Value).CompareAndSwap", Method, 17}, @@ -10023,6 +10322,11 @@ var PackageSymbols = map[string][]Symbol{ {"AddUint32", Func, 0}, {"AddUint64", Func, 0}, {"AddUintptr", Func, 0}, + {"AndInt32", Func, 23}, + {"AndInt64", Func, 23}, + {"AndUint32", Func, 23}, + {"AndUint64", Func, 23}, + {"AndUintptr", Func, 23}, {"Bool", Type, 19}, {"CompareAndSwapInt32", Func, 0}, {"CompareAndSwapInt64", Func, 0}, @@ -10038,6 +10342,11 @@ var PackageSymbols = map[string][]Symbol{ {"LoadUint32", Func, 0}, {"LoadUint64", Func, 0}, {"LoadUintptr", Func, 0}, + {"OrInt32", Func, 23}, + {"OrInt64", Func, 23}, + {"OrUint32", Func, 23}, + {"OrUint64", Func, 23}, + {"OrUintptr", Func, 23}, {"Pointer", Type, 19}, {"StoreInt32", Func, 0}, {"StoreInt64", Func, 0}, @@ -16200,6 +16509,7 @@ var PackageSymbols = map[string][]Symbol{ {"WSAEACCES", Const, 2}, {"WSAECONNABORTED", Const, 9}, {"WSAECONNRESET", Const, 3}, + {"WSAENOPROTOOPT", Const, 23}, {"WSAEnumProtocols", Func, 2}, {"WSAID_CONNECTEX", Var, 1}, {"WSAIoctl", Func, 0}, @@ -16308,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0}, }, "testing": { + {"(*B).Chdir", Method, 24}, {"(*B).Cleanup", Method, 14}, + {"(*B).Context", Method, 24}, {"(*B).Elapsed", Method, 20}, {"(*B).Error", Method, 0}, {"(*B).Errorf", Method, 0}, @@ -16320,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Helper", Method, 9}, {"(*B).Log", Method, 0}, {"(*B).Logf", Method, 0}, + {"(*B).Loop", Method, 24}, {"(*B).Name", Method, 8}, {"(*B).ReportAllocs", Method, 1}, {"(*B).ReportMetric", Method, 13}, @@ -16337,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0}, {"(*B).TempDir", Method, 15}, {"(*F).Add", Method, 18}, + {"(*F).Chdir", Method, 24}, {"(*F).Cleanup", Method, 18}, + {"(*F).Context", Method, 24}, {"(*F).Error", Method, 18}, {"(*F).Errorf", Method, 18}, {"(*F).Fail", Method, 18}, @@ -16358,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18}, {"(*M).Run", Method, 4}, {"(*PB).Next", Method, 3}, + {"(*T).Chdir", Method, 24}, {"(*T).Cleanup", Method, 14}, + {"(*T).Context", Method, 24}, {"(*T).Deadline", Method, 15}, {"(*T).Error", Method, 0}, {"(*T).Errorf", Method, 0}, @@ -16849,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{ {"(Time).Add", Method, 0}, {"(Time).AddDate", Method, 0}, {"(Time).After", Method, 0}, + {"(Time).AppendBinary", Method, 24}, {"(Time).AppendFormat", Method, 5}, + {"(Time).AppendText", Method, 24}, {"(Time).Before", Method, 0}, {"(Time).Clock", Method, 0}, {"(Time).Compare", Method, 20}, @@ -17284,6 +17603,7 @@ var PackageSymbols = map[string][]Symbol{ {"Encode", Func, 0}, {"EncodeRune", Func, 0}, {"IsSurrogate", Func, 0}, + {"RuneLen", Func, 23}, }, "unicode/utf8": { {"AppendRune", Func, 18}, @@ -17306,6 +17626,11 @@ var PackageSymbols = map[string][]Symbol{ {"ValidRune", Func, 1}, {"ValidString", Func, 0}, }, + "unique": { + {"(Handle).Value", Method, 23}, + {"Handle", Type, 23}, + {"Make", Func, 23}, + }, "unsafe": { {"Add", Func, 0}, {"Alignof", Func, 0}, @@ -17317,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{ {"String", Func, 0}, {"StringData", Func, 0}, }, + "weak": { + {"(Pointer).Value", Method, 24}, + {"Make", Func, 24}, + {"Pointer", Type, 24}, + }, } diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a36..000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 000000000..cdae2b8e8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,68 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 000000000..6e83c6fb1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,150 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 000000000..0ade5c294 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := 0; i < n; i++ { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + if params := t.TypeParams(); params.Len() > args.Len() { + return true // this is an uninstantiated named type. + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 000000000..93c80fdc9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 000000000..cbd12f801 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 βˆͺ y2 βˆͺ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "βˆ…" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓀 term, the entire list is 𝓀. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓀 term, the entire list is 𝓀. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓀 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl βˆͺ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y βŠ† xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl βŠ† yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 000000000..7350bb702 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// βˆ…: (*term)(nil) == βˆ… // set of no types (empty set) +// 𝓀: &term{} == 𝓀 // set of all types (𝓀niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "βˆ…" + case x.typ == nil: + return "𝓀" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x βˆͺ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // βˆ… βˆͺ βˆ… == βˆ… + case x == nil: + return y, nil // βˆ… βˆͺ y == y + case y == nil: + return x, nil // x βˆͺ βˆ… == x + case x.typ == nil: + return x, nil // 𝓀 βˆͺ y == 𝓀 + case y.typ == nil: + return y, nil // x βˆͺ 𝓀 == 𝓀 + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + if x.disjoint(y) { + return x, y // x βˆͺ y == (x, y) if x ∩ y == βˆ… + } + // x.typ == y.typ + + // ~t βˆͺ ~t == ~t + // ~t βˆͺ T == ~t + // T βˆͺ ~t == ~t + // T βˆͺ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // βˆ… ∩ y == βˆ… and ∩ βˆ… == βˆ… + case x.typ == nil: + return y // 𝓀 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓀 == x + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + if x.disjoint(y) { + return nil // x ∩ y == βˆ… if x ∩ y == βˆ… + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ βˆ… == false + case x.typ == nil: + return true // t ∈ 𝓀 == true + } + // βˆ… βŠ‚ x βŠ‚ 𝓀 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x βŠ† y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // βˆ… βŠ† y == true + case y == nil: + return false // x βŠ† βˆ… == false since x != βˆ… + case y.typ == nil: + return true // x βŠ† 𝓀 == true + case x.typ == nil: + return false // 𝓀 βŠ† y == false since y != 𝓀 + } + // βˆ… βŠ‚ x, y βŠ‚ 𝓀 + + if x.disjoint(y) { + return false // x βŠ† y == false if x ∩ y == βˆ… + } + // x.typ == y.typ + + // ~t βŠ† ~t == true + // ~t βŠ† T == false + // T βŠ† ~t == true + // T βŠ† T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == βˆ…. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 000000000..4957f0216 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e05381..131caab28 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go new file mode 100644 index 000000000..b64f714eb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -0,0 +1,46 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/types" + "strconv" +) + +// FileQualifier returns a [types.Qualifier] function that qualifies +// imported symbols appropriately based on the import environment of a given +// file. +// If the same package is imported multiple times, the last appearance is +// recorded. +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { + // Construct mapping of import paths to their defined names. + // It is only necessary to look at renaming imports. + imports := make(map[string]string) + for _, imp := range f.Imports { + if imp.Name != nil && imp.Name.Name != "_" { + path, _ := strconv.Unquote(imp.Path.Value) + imports[path] = imp.Name.Name + } + } + + // Define qualifier to replace full package paths with names of the imports. + return func(p *types.Package) string { + if p == nil || p == pkg { + return "" + } + + if name, ok := imports[p.Path()]; ok { + if name == "." { + return "" + } else { + return name + } + } + + // If there is no local renaming, fall back to the package name. + return p.Name() + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b75..e54accc69 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,20 +6,20 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. +// +// The named result may be nil in ill-typed code. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +36,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 7c77c2fbc..a93d51f98 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -48,3 +50,73 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } + +// NameRelativeTo returns a types.Qualifier that qualifies members of +// all packages other than pkg, using only the package name. +// (By contrast, [types.RelativeTo] uses the complete package path, +// which is often excessive.) +// +// If pkg is nil, it is equivalent to [*types.Package.Name]. +func NameRelativeTo(pkg *types.Package) types.Qualifier { + return func(other *types.Package) string { + if pkg != nil && pkg == other { + return "" // same package; unqualified + } + return other.Name() + } +} + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named".) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName + // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22. +} + +// TypeParams is a light shim around t.TypeParams(). +// (go/types.Alias).TypeParams requires >= 1.23. +func TypeParams(t NamedOrAlias) *types.TypeParamList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeParams(t) + case *types.Named: + return t.TypeParams() + } + return nil +} + +// TypeArgs is a light shim around t.TypeArgs(). +// (go/types.Alias).TypeArgs requires >= 1.23. +func TypeArgs(t NamedOrAlias) *types.TypeList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeArgs(t) + case *types.Named: + return t.TypeArgs() + } + return nil +} + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go new file mode 100644 index 000000000..d272949c1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -0,0 +1,392 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" +) + +// ZeroString returns the string representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroString may return a partially correct +// string representation. The caller should use the returned isValid boolean +// to determine the validity of the expression. +// +// When assigning to a wider type (such as 'any'), it's the caller's +// responsibility to handle any necessary type conversions. +// +// This string can be used on the right-hand side of an assignment where the +// left-hand side has that explicit type. +// References to named types are qualified by an appropriate (optional) +// qualifier function. +// Exception: This does not apply to tuples. Their string representation is +// informational only and cannot be used in an assignment. +// +// See [ZeroExpr] for a variant that returns an [ast.Expr]. +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false", true + case t.Info()&types.IsNumeric != 0: + return "0", true + case t.Info()&types.IsString != 0: + return `""`, true + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil", true + case t.Kind() == types.Invalid: + return "invalid", false + default: + panic(fmt.Sprintf("ZeroString for unexpected type %v", t)) + } + + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return "nil", true + + case *types.Interface: + if !t.IsMethodSet() { + return "invalid", false + } + return "nil", true + + case *types.Named: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + return ZeroString(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + // A type parameter can have alias but alias type's underlying type + // can never be a type parameter. + // Use types.Unalias to preserve the info of type parameter instead + // of call Underlying() going right through and get the underlying + // type of the type parameter which is always an interface. + return ZeroString(types.Unalias(t), qual) + } + + case *types.Array, *types.Struct: + return types.TypeString(t, qual) + "{}", true + + case *types.TypeParam: + // Assumes func new is not shadowed. + return "*new(" + types.TypeString(t, qual) + ")", true + + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + isValid := true + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + comp, ok := ZeroString(t.At(i).Type(), qual) + + components[i] = comp + isValid = isValid && ok + } + return "(" + strings.Join(components, ", ") + ")", isValid + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// ZeroExpr returns the ast.Expr representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr +// representation. The caller should use the returned isValid boolean to determine +// the validity of the expression. +// +// This function is designed for types suitable for variables and should not be +// used with Tuple or Union types.References to named types are qualified by an +// appropriate (optional) qualifier function. +// +// See [ZeroString] for a variant that returns a string. +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"}, true + case t.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"}, true + case t.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return ast.NewIdent("nil"), true + case t.Kind() == types.Invalid: + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + default: + panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t)) + } + + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return ast.NewIdent("nil"), true + + case *types.Interface: + if !t.IsMethodSet() { + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + } + return ast.NewIdent("nil"), true + + case *types.Named: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(types.Unalias(t), qual) + } + + case *types.Array, *types.Struct: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + + case *types.TypeParam: + return &ast.StarExpr{ // *new(T) + X: &ast.CallExpr{ + // Assumes func new is not shadowed. + Fun: ast.NewIdent("new"), + Args: []ast.Expr{ + ast.NewIdent(t.Obj().Name()), + }, + }, + }, true + + case *types.Tuple: + // Unlike ZeroString, there is no ast.Expr can express tuple by + // "(t[0], ..., t[n])". + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// IsZeroExpr uses simple syntactic heuristics to report whether expr +// is a obvious zero value, such as 0, "", nil, or false. +// It cannot do better without type information. +func IsZeroExpr(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to named types +// are qualified by an appropriate (optional) qualifier function. +// It may panic for types such as Tuple or Union. +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { + switch t := t.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + + case *types.Pointer: + return &ast.UnaryExpr{ + Op: token.MUL, + X: TypeExpr(t.Elem(), qual), + } + + case *types.Array: + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: TypeExpr(t.Elem(), qual), + } + + case *types.Slice: + return &ast.ArrayType{ + Elt: TypeExpr(t.Elem(), qual), + } + + case *types.Map: + return &ast.MapType{ + Key: TypeExpr(t.Key(), qual), + Value: TypeExpr(t.Elem(), qual), + } + + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + return &ast.ChanType{ + Dir: dir, + Value: TypeExpr(t.Elem(), qual), + } + + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + params = append(params, &ast.Field{ + Type: TypeExpr(t.Params().At(i).Type(), qual), + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + returns = append(returns, &ast.Field{ + Type: TypeExpr(t.Results().At(i).Type(), qual), + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + + case *types.TypeParam: + pkgName := qual(t.Obj().Pkg()) + if pkgName == "" || t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + + // types.TypeParam also implements interface NamedOrAlias. To differentiate, + // case TypeParam need to be present before case NamedOrAlias. + // TODO(hxjiang): remove this comment once TypeArgs() is added to interface + // NamedOrAlias. + case NamedOrAlias: + var expr ast.Expr = ast.NewIdent(t.Obj().Name()) + if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" { + expr = &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: expr.(*ast.Ident), + } + } + + // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to + // typesinternal.NamedOrAlias. + if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { + if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { + var indices []ast.Expr + for i := range typeArgs.Len() { + indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + } + expr = &ast.IndexListExpr{ + X: expr, + Indices: indices, + } + } + } + + return expr + + case *types.Struct: + return ast.NewIdent(t.String()) + + case *types.Interface: + return ast.NewIdent(t.String()) + + case *types.Union: + if t.Len() == 0 { + panic("Union type should have at least one term") + } + // Same as go/ast, the return expression will put last term in the + // Y field at topmost level of BinaryExpr. + // For union of type "float32 | float64 | int64", the structure looks + // similar to: + // { + // X: { + // X: float32, + // Op: | + // Y: float64, + // } + // Op: |, + // Y: int64, + // } + var union ast.Expr + for i := range t.Len() { + term := t.Term(i) + termExpr := TypeExpr(term.Type(), qual) + if term.Tilde() { + termExpr = &ast.UnaryExpr{ + Op: token.TILDE, + X: termExpr, + } + } + if i == 0 { + union = termExpr + } else { + union = &ast.BinaryExpr{ + X: union, + Op: token.OR, + Y: termExpr, + } + } + } + return union + + case *types.Tuple: + panic("invalid input type types.Tuple") + + default: + panic("unreachable") + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a53..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa126..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21f..0fc10ce4e 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,29 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d334..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index e8180632a..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersions returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 055480b9e..7348c50c0 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -11,6 +11,7 @@ go: - "1.11.x" - "1.12.x" - "1.13.x" + - "1.14.x" - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index d2c2308f1..acf71402c 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -79,6 +79,8 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { parser.encoding = encoding } +var disableLineWrapping = false + // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ @@ -86,7 +88,9 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), - best_width: -1, + } + if disableLineWrapping { + emitter.best_width = -1 } } diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 89650e293..30813884c 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -175,7 +175,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -464,3 +464,15 @@ func isZero(v reflect.Value) bool { } return false } + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c722fe5a8..f72f2b538 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# github.com/BurntSushi/toml v1.2.1 -## explicit; go 1.16 +# github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c +## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal # github.com/Kunde21/markdownfmt/v3 v3.1.0 @@ -38,7 +38,7 @@ github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k github.com/ProtonMail/go-crypto/openpgp/x25519 github.com/ProtonMail/go-crypto/openpgp/x448 -# github.com/agext/levenshtein v1.2.3 +# github.com/agext/levenshtein v1.2.2 ## explicit github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 @@ -214,9 +214,14 @@ github.com/cloudflare/circl/sign/ed448 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/fastly/go-fastly/v9 v9.12.0 +# github.com/dnaeon/go-vcr v1.2.0 +## explicit; go 1.15 +github.com/dnaeon/go-vcr/cassette +github.com/dnaeon/go-vcr/recorder +# github.com/fastly/go-fastly/v9 v9.13.0 ## explicit; go 1.20 github.com/fastly/go-fastly/v9/fastly +github.com/fastly/go-fastly/v9/fastly/domains/v1 # github.com/fatih/color v1.16.0 ## explicit; go 1.17 github.com/fatih/color @@ -402,8 +407,8 @@ github.com/mitchellh/copystructure # github.com/mitchellh/go-testing-interface v1.14.1 ## explicit; go 1.14 github.com/mitchellh/go-testing-interface -# github.com/mitchellh/go-wordwrap v1.0.1 -## explicit; go 1.14 +# github.com/mitchellh/go-wordwrap v1.0.0 +## explicit github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 @@ -411,8 +416,8 @@ github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/oklog/run v1.1.0 -## explicit; go 1.13 +# github.com/oklog/run v1.0.0 +## explicit github.com/oklog/run # github.com/peterhellberg/link v1.2.0 ## explicit; go 1.17 @@ -476,7 +481,7 @@ github.com/zclconf/go-cty/cty/set # go.abhg.dev/goldmark/frontmatter v0.2.0 ## explicit; go 1.20 go.abhg.dev/goldmark/frontmatter -# golang.org/x/crypto v0.29.0 +# golang.org/x/crypto v0.32.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -500,13 +505,13 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/mod v0.17.0 -## explicit; go 1.18 +# golang.org/x/mod v0.22.0 +## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.31.0 +# golang.org/x/net v0.34.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -514,15 +519,15 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.9.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.27.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.20.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal @@ -534,10 +539,12 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d -## explicit; go 1.19 +# golang.org/x/tools v0.29.0 +## explicit; go 1.22.0 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/checker +golang.org/x/tools/go/analysis/internal golang.org/x/tools/go/analysis/internal/analysisflags golang.org/x/tools/go/analysis/internal/checker golang.org/x/tools/go/analysis/multichecker @@ -545,9 +552,9 @@ golang.org/x/tools/go/analysis/passes/inspect golang.org/x/tools/go/analysis/unitchecker golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases golang.org/x/tools/internal/analysisinternal golang.org/x/tools/internal/diff @@ -563,7 +570,7 @@ golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/robustio golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions # google.golang.org/appengine v1.6.8 @@ -679,8 +686,8 @@ google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/yaml.v2 v2.3.0 -## explicit +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit